Skip to content
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 35 additions & 12 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1083,12 +1083,24 @@ def _calculate_next_summary_threshold(self) -> int:
return threshold

def _update_memory_with_summary(
self, summary: str, include_summaries: bool = False
self,
summary: str,
include_summaries: bool,
current_user_input: BaseMessage,
) -> None:
r"""Update memory with summary result.

This method handles memory clearing and restoration of summaries based
on whether it's a progressive or full compression.

Args:
summary (str): The summary content to add to memory.
include_summaries (bool): Whether previous summaries were included
in the summarization.
current_user_input (BaseMessage): The current step's user input
message to preserve after summarization. This message will be
re-added to memory after clearing, ensuring the agent retains
the original instruction.
"""

summary_content: str = summary
Expand Down Expand Up @@ -1121,16 +1133,19 @@ def _update_memory_with_summary(
role_name="assistant", content=summary_content
)
self.update_memory(new_summary_msg, OpenAIBackendRole.ASSISTANT)
input_message = BaseMessage.make_assistant_message(
role_name="assistant",
content=(
"Please continue the conversation from "
"where we left it off without asking the user any further "
"questions. Continue with the last task that you were "
"asked to work on."
),

# Re-add current user input with context hint
enhanced_content = (
"[CONTEXT_SUMMARY] The context above is a summary of our "
"previous conversation. Continue executing the original task "
"below.\n\n"
f"{current_user_input.content}"
)
enhanced_user_input = current_user_input.create_new_instance(
enhanced_content
)
self.update_memory(input_message, OpenAIBackendRole.ASSISTANT)
self.update_memory(enhanced_user_input, OpenAIBackendRole.USER)

# Update token count
try:
summary_tokens = (
Expand Down Expand Up @@ -2807,6 +2822,7 @@ def _step_impl(
self._update_memory_with_summary(
summary.get("summary", ""),
include_summaries=True,
current_user_input=input_message,
)
elif num_tokens > threshold:
logger.info(
Expand All @@ -2818,6 +2834,7 @@ def _step_impl(
self._update_memory_with_summary(
summary.get("summary", ""),
include_summaries=False,
current_user_input=input_message,
)
accumulated_context_tokens += num_tokens
except RuntimeError as e:
Expand Down Expand Up @@ -2897,7 +2914,9 @@ def _step_impl(
summary_messages += "\n\n" + tool_notice

self._update_memory_with_summary(
summary_messages, include_summaries=False
summary_messages,
include_summaries=False,
current_user_input=input_message,
)
self._last_token_limit_tool_signature = tool_signature
return self._step_impl(input_message, response_format)
Expand Down Expand Up @@ -3130,6 +3149,7 @@ async def _astep_non_streaming_task(
self._update_memory_with_summary(
summary.get("summary", ""),
include_summaries=True,
current_user_input=input_message,
)
elif num_tokens > threshold:
logger.info(
Expand All @@ -3143,6 +3163,7 @@ async def _astep_non_streaming_task(
self._update_memory_with_summary(
summary.get("summary", ""),
include_summaries=False,
current_user_input=input_message,
)
accumulated_context_tokens += num_tokens
except RuntimeError as e:
Expand Down Expand Up @@ -3222,7 +3243,9 @@ async def _astep_non_streaming_task(
if tool_notice:
summary_messages += "\n\n" + tool_notice
self._update_memory_with_summary(
summary_messages, include_summaries=False
summary_messages,
include_summaries=False,
current_user_input=input_message,
)
self._last_token_limit_tool_signature = tool_signature
return await self._astep_non_streaming_task(
Expand Down
Loading