Skip to content

Commit 3c7e378

Browse files
authored
Merge pull request FoundationAgents#797 from fred913/main
Several improvements
2 parents b7dcbfe + b9df45b commit 3c7e378

8 files changed

Lines changed: 126 additions & 82 deletions

File tree

.github/ISSUE_TEMPLATE/request_new_features.md

Lines changed: 0 additions & 14 deletions
This file was deleted.
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
name: "🤔 Request new features"
2+
description: Suggest ideas or features you’d like to see implemented in OpenManus.
3+
labels: enhancement
4+
body:
5+
- type: textarea
6+
id: feature-description
7+
attributes:
8+
label: Feature description
9+
description: |
10+
Provide a clear and concise description of the proposed feature
11+
validations:
12+
required: true
13+
- type: textarea
14+
id: your-feature
15+
attributes:
16+
label: Your Feature
17+
description: |
18+
Explain your idea or implementation process, if any. Optionally, include a Pull Request URL.
19+
Ensure accompanying docs/tests/examples are provided for review.
20+
validations:
21+
required: false

.github/ISSUE_TEMPLATE/show_me_the_bug.md

Lines changed: 0 additions & 25 deletions
This file was deleted.
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
name: "🪲 Show me the Bug"
2+
description: Report a bug encountered while using OpenManus and seek assistance.
3+
labels: bug
4+
body:
5+
- type: textarea
6+
id: bug-description
7+
attributes:
8+
label: Bug Description
9+
description: |
10+
Clearly describe the bug you encountered
11+
validations:
12+
required: true
13+
- type: textarea
14+
id: solve-method
15+
attributes:
16+
label: Bug solved method
17+
description: |
18+
If resolved, explain the solution. Optionally, include a Pull Request URL.
19+
If unresolved, provide additional details to aid investigation
20+
validations:
21+
required: true
22+
- type: textarea
23+
id: environment-information
24+
attributes:
25+
label: Environment information
26+
description: |
27+
System: e.g., Ubuntu 22.04
28+
Python: e.g., 3.12
29+
OpenManus version: e.g., 0.1.0
30+
value: |
31+
- System version:
32+
- Python version:
33+
- OpenManus version or branch:
34+
- Installation method (e.g., `pip install -r requirements.txt` or `pip install -e .`):
35+
validations:
36+
required: true
37+
- type: textarea
38+
id: extra-information
39+
attributes:
40+
label: Extra information
41+
description: |
42+
For example, attach screenshots or logs to help diagnose the issue
43+
validations:
44+
required: false

.github/workflows/pr-autodiff.yaml

Lines changed: 22 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -15,21 +15,20 @@ jobs:
1515
(github.event_name == 'pull_request') ||
1616
(github.event_name == 'issue_comment' &&
1717
contains(github.event.comment.body, '!pr-diff') &&
18-
(github.event.comment.author_association == 'COLLABORATOR' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') &&
18+
(github.event.comment.author_association == 'CONTRIBUTOR' || github.event.comment.author_association == 'COLLABORATOR' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') &&
1919
github.event.issue.pull_request)
2020
steps:
2121
- name: Get PR head SHA
2222
id: get-pr-sha
2323
run: |
24-
if [ "${{ github.event_name }}" == "pull_request" ]; then
25-
echo "pr_sha=${{ github.event.pull_request.head.sha }}" >> $GITHUB_OUTPUT
26-
echo "Retrieved PR head SHA: ${{ github.event.pull_request.head.sha }}"
27-
else
28-
PR_URL="${{ github.event.issue.pull_request.url }}"
29-
SHA=$(curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" $PR_URL | jq -r '.head.sha')
30-
echo "pr_sha=$SHA" >> $GITHUB_OUTPUT
31-
echo "Retrieved PR head SHA from API: $SHA"
32-
fi
24+
PR_URL="${{ github.event.issue.pull_request.url || github.event.pull_request.url }}"
25+
# https://api.github.com/repos/OpenManus/pulls/1
26+
RESPONSE=$(curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" $PR_URL)
27+
SHA=$(echo $RESPONSE | jq -r '.head.sha')
28+
TARGET_BRANCH=$(echo $RESPONSE | jq -r '.base.ref')
29+
echo "pr_sha=$SHA" >> $GITHUB_OUTPUT
30+
echo "target_branch=$TARGET_BRANCH" >> $GITHUB_OUTPUT
31+
echo "Retrieved PR head SHA from API: $SHA, target branch: $TARGET_BRANCH"
3332
- name: Check out code
3433
uses: actions/checkout@v4
3534
with:
@@ -49,6 +48,7 @@ jobs:
4948
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
5049
GH_TOKEN: ${{ github.token }}
5150
PR_NUMBER: ${{ github.event.pull_request.number || github.event.issue.number }}
51+
TARGET_BRANCH: ${{ steps.get-pr-sha.outputs.target_branch }}
5252
run: |-
5353
cat << 'EOF' > /tmp/_workflow_core.py
5454
import os
@@ -59,7 +59,7 @@ jobs:
5959
6060
def get_diff():
6161
result = subprocess.run(
62-
['git', 'diff', 'origin/main...HEAD'],
62+
['git', 'diff', 'origin/' + os.getenv('TARGET_BRANCH') + '...HEAD'],
6363
capture_output=True, text=True, check=True)
6464
return '\n'.join(
6565
line for line in result.stdout.split('\n')
@@ -86,6 +86,17 @@ jobs:
8686
8787
### Spelling/Offensive Content Check
8888
- No spelling mistakes or offensive content found in the code or comments.
89+
90+
## 中文(简体)
91+
- 新增了 `ABC` 类
92+
- `foo` 模块中的 `f()` 行为已修复
93+
94+
### 评论高亮
95+
- `config.toml` 需要正确配置才能确保新功能正常运行。
96+
97+
### 内容检查
98+
- 没有发现代码或注释中的拼写错误或不当措辞。
99+
89100
3. Highlight non-English comments
90101
4. Check for spelling/offensive content'''
91102

app/agent/toolcall.py

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -71,40 +71,42 @@ async def think(self) -> bool:
7171
return False
7272
raise
7373

74-
self.tool_calls = response.tool_calls
74+
self.tool_calls = tool_calls = (
75+
response.tool_calls if response and response.tool_calls else []
76+
)
77+
content = response.content if response and response.content else ""
7578

7679
# Log response info
77-
logger.info(f"✨ {self.name}'s thoughts: {response.content}")
80+
logger.info(f"✨ {self.name}'s thoughts: {content}")
7881
logger.info(
79-
f"🛠️ {self.name} selected {len(response.tool_calls) if response.tool_calls else 0} tools to use"
82+
f"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use"
8083
)
81-
if response.tool_calls:
82-
logger.info(
83-
f"🧰 Tools being prepared: {[call.function.name for call in response.tool_calls]}"
84-
)
84+
if tool_calls:
8585
logger.info(
86-
f"🔧 Tool arguments: {response.tool_calls[0].function.arguments}"
86+
f"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}"
8787
)
88+
logger.info(f"🔧 Tool arguments: {tool_calls[0].function.arguments}")
8889

8990
try:
91+
if response is None:
92+
raise RuntimeError("No response received from the LLM")
93+
9094
# Handle different tool_choices modes
9195
if self.tool_choices == ToolChoice.NONE:
92-
if response.tool_calls:
96+
if tool_calls:
9397
logger.warning(
9498
f"🤔 Hmm, {self.name} tried to use tools when they weren't available!"
9599
)
96-
if response.content:
97-
self.memory.add_message(Message.assistant_message(response.content))
100+
if content:
101+
self.memory.add_message(Message.assistant_message(content))
98102
return True
99103
return False
100104

101105
# Create and add assistant message
102106
assistant_msg = (
103-
Message.from_tool_calls(
104-
content=response.content, tool_calls=self.tool_calls
105-
)
107+
Message.from_tool_calls(content=content, tool_calls=self.tool_calls)
106108
if self.tool_calls
107-
else Message.assistant_message(response.content)
109+
else Message.assistant_message(content)
108110
)
109111
self.memory.add_message(assistant_msg)
110112

@@ -113,7 +115,7 @@ async def think(self) -> bool:
113115

114116
# For 'auto' mode, continue with content if no commands but content exists
115117
if self.tool_choices == ToolChoice.AUTO and not self.tool_calls:
116-
return bool(response.content)
118+
return bool(content)
117119

118120
return bool(self.tool_calls)
119121
except Exception as e:
@@ -209,7 +211,7 @@ async def execute_tool(self, command: ToolCall) -> str:
209211
return f"Error: {error_msg}"
210212
except Exception as e:
211213
error_msg = f"⚠️ Tool '{name}' encountered a problem: {str(e)}"
212-
logger.error(error_msg)
214+
logger.exception(error_msg)
213215
return f"Error: {error_msg}"
214216

215217
async def _handle_special_tool(self, name: str, result: Any, **kwargs):

app/llm.py

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
OpenAIError,
1111
RateLimitError,
1212
)
13+
from openai.types.chat.chat_completion_message import ChatCompletionMessage
1314
from tenacity import (
1415
retry,
1516
retry_if_exception_type,
@@ -421,9 +422,9 @@ async def ask(
421422

422423
if not stream:
423424
# Non-streaming request
424-
params["stream"] = False
425-
426-
response = await self.client.chat.completions.create(**params)
425+
response = await self.client.chat.completions.create(
426+
**params, stream=False
427+
)
427428

428429
if not response.choices or not response.choices[0].message.content:
429430
raise ValueError("Empty or invalid response from LLM")
@@ -438,8 +439,7 @@ async def ask(
438439
# Streaming request, For streaming, update estimated token count before making the request
439440
self.update_token_count(input_tokens)
440441

441-
params["stream"] = True
442-
response = await self.client.chat.completions.create(**params)
442+
response = await self.client.chat.completions.create(**params, stream=True)
443443

444444
collected_messages = []
445445
completion_text = ""
@@ -466,20 +466,20 @@ async def ask(
466466
except TokenLimitExceeded:
467467
# Re-raise token limit errors without logging
468468
raise
469-
except ValueError as ve:
470-
logger.error(f"Validation error: {ve}")
469+
except ValueError:
470+
logger.exception(f"Validation error")
471471
raise
472472
except OpenAIError as oe:
473-
logger.error(f"OpenAI API error: {oe}")
473+
logger.exception(f"OpenAI API error")
474474
if isinstance(oe, AuthenticationError):
475475
logger.error("Authentication failed. Check API key.")
476476
elif isinstance(oe, RateLimitError):
477477
logger.error("Rate limit exceeded. Consider increasing retry attempts.")
478478
elif isinstance(oe, APIError):
479479
logger.error(f"API error: {oe}")
480480
raise
481-
except Exception as e:
482-
logger.error(f"Unexpected error in ask: {e}")
481+
except Exception:
482+
logger.exception(f"Unexpected error in ask")
483483
raise
484484

485485
@retry(
@@ -654,7 +654,7 @@ async def ask_tool(
654654
tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore
655655
temperature: Optional[float] = None,
656656
**kwargs,
657-
):
657+
) -> ChatCompletionMessage | None:
658658
"""
659659
Ask LLM using functions/tools and return the response.
660660
@@ -732,12 +732,15 @@ async def ask_tool(
732732
temperature if temperature is not None else self.temperature
733733
)
734734

735-
response = await self.client.chat.completions.create(**params)
735+
response: ChatCompletion = await self.client.chat.completions.create(
736+
**params, stream=False
737+
)
736738

737739
# Check if response is valid
738740
if not response.choices or not response.choices[0].message:
739741
print(response)
740-
raise ValueError("Invalid or empty response from LLM")
742+
# raise ValueError("Invalid or empty response from LLM")
743+
return None
741744

742745
# Update token counts
743746
self.update_token_count(

app/tool/file_operators.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,17 +42,19 @@ async def run_command(
4242
class LocalFileOperator(FileOperator):
4343
"""File operations implementation for local filesystem."""
4444

45+
encoding: str = "utf-8"
46+
4547
async def read_file(self, path: PathLike) -> str:
4648
"""Read content from a local file."""
4749
try:
48-
return Path(path).read_text()
50+
return Path(path).read_text(encoding=self.encoding)
4951
except Exception as e:
5052
raise ToolError(f"Failed to read {path}: {str(e)}") from None
5153

5254
async def write_file(self, path: PathLike, content: str) -> None:
5355
"""Write content to a local file."""
5456
try:
55-
Path(path).write_text(content)
57+
Path(path).write_text(content, encoding=self.encoding)
5658
except Exception as e:
5759
raise ToolError(f"Failed to write to {path}: {str(e)}") from None
5860

0 commit comments

Comments
 (0)