Skip to content

ApplicationContext JSON Serialization Error #755

@Tom-roujiang

Description

@Tom-roujiang

I encountered a JSON serialization error related to ApplicationContext when running the example code to create an example agent. The error occurs in the _save_trajectories method when attempting to serialize task trajectories, and appears to stem from the aworld.utils.serialized_util.to_serializable function not properly handling ApplicationContext objects.

Error Log

  Failed to get trajectories: Object of type ApplicationContext is not JSON serializable.Traceback (most recent call last):
  File "/Users/liyun/Downloads/AWorld-main/aworld/runners/event_runner.py", line 444, in _save_trajectories
    "trajectory": json.dumps(self._task_response.trajectory, ensure_ascii=False),
                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/liyun/miniconda3/lib/python3.11/json/__init__.py", line 238, in dumps
    **kw).encode(obj)
          ^^^^^^^^^^^
  File "/Users/liyun/miniconda3/lib/python3.11/json/encoder.py", line 200, in encode
    chunks = self.iterencode(o, _one_shot=True)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/liyun/miniconda3/lib/python3.11/json/encoder.py", line 258, in iterencode
    return _iterencode(o, 0)
           ^^^^^^^^^^^^^^^^^
  File "/Users/liyun/miniconda3/lib/python3.11/json/encoder.py", line 180, in default
    raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type ApplicationContext is not JSON serializable 

My code

from aworld.agents.llm_agent import Agent
from aworld.config.conf import AgentConfig, ModelConfig
from aworld.core.tool.func_to_tool import be_tool
from aworld.runner import Runners
from aworld.core.task import Task
from aworld.config.conf import AgentMemoryConfig
from aworld.core.memory import LongTermConfig
from aworld.core.context.amni.config import init_middlewares, AmniConfigFactory, AmniConfigLevel
from aworld.memory.main import MemoryFactory, InMemoryMemoryStore
from aworld.memory.models import AgentExperience
import asyncio
import os

# ---------------------- 步骤1:配置LLM(优先使用环境变量,安全且易部署) ----------------------
os.environ["LLM_PROVIDER"] = "openai"  
os.environ["LLM_MODEL_NAME"] = "gpt-4"  
os.environ["LLM_API_KEY"] = ""  
os.environ["LLM_BASE_URL"] = "https://api.openai.com/v1"  

# 构建共享ModelConfig(后续新增Agent可直接复用该配置)
model_config = ModelConfig(
    llm_provider=os.getenv("LLM_PROVIDER", "openai"),
    llm_model_name=os.getenv("LLM_MODEL_NAME"),
    llm_base_url=os.getenv("LLM_BASE_URL"),
    llm_api_key=os.getenv("LLM_API_KEY"),
)


init_middlewares()

# 构建Agent专属配置(含记忆配置)
agent_memory_config = AgentMemoryConfig(
    enable_summary=True,
    summary_rounds=5,
    summary_context_length=8000,
    history_rounds=20,
    enable_long_term=True,
    long_term_config=LongTermConfig.create_simple_config(
        application_id="aworld-agent-example",
        enable_user_profiles=True,
        enable_agent_experiences=True
    )
)

agent_config = AgentConfig(
    llm_config=model_config,
    memory_config=agent_memory_config,
)

# ---------------------- 步骤2:定义本地工具(Agent的核心能力扩展) ----------------------
# 工具1:基础问候工具(支持自定义用户名)
@be_tool(tool_name="custom_greeting_tool",tool_desc="A tool to generate a warm greeting message, accepts a user name parameter to personalize the greeting.")
def custom_greeting_tool(user_name: str = "User") -> str:
    """
    生成个性化问候语
    :param user_name: 用户名,可选参数,默认值为User
    :return: 个性化问候字符串
    """
    return f"Hello {user_name}! Nice to meet you, I'm your intelligent text processing assistant."

# 工具2:文本摘要工具(提炼文本核心信息)
@be_tool(tool_name="text_summarization_tool",tool_desc="A tool to summarize long text, extract the core information and key points, return a concise summary.")
def text_summarization_tool(text: str) -> str:
    """
    对长文本进行摘要提取(依赖LLM的内置能力,此处封装为工具方便Agent调用)
    :param text: 需要被摘要的长文本
    :return: 简洁的文本摘要
    """
    # 这里直接封装摘要逻辑,Agent会结合LLM优化输出结果
    summary_prompt = f"Please summarize the following text concisely, within 100 words: \n\n{text}"
    return summary_prompt

# 工具3:中英互译工具(支持中文转英文、英文转中文)
@be_tool(tool_name="cn_en_translate_tool",tool_desc="A tool for Chinese-English mutual translation, accepts text and target language parameters, target language options: 'zh' or 'en'.")
def cn_en_translate_tool(text: str, target_language: str = "en") -> str:
    """
    中英互译工具
    :param text: 需要翻译的文本
    :param target_language: 目标语言,可选值:zh(中文)、en(英文),默认值为en
    :return: 翻译提示词(Agent结合LLM输出最终翻译结果)
    """
    if target_language not in ["zh", "en"]:
        return "Error: Target language only supports 'zh' (Chinese) or 'en' (English)."
    
    translate_prompt = f"Please translate the following text into {target_language}: \n\n{text}"
    return translate_prompt

# ---------------------- 步骤3:设置System Prompt(定义Agent的角色和行为逻辑) ----------------------
system_prompt = """
You are an intelligent text processing assistant, named 'TextProAgent'.
Your core capabilities are provided by three tools, and you must use the corresponding tools to complete user tasks:
1. Use custom_greeting_tool when the user needs a greeting or welcome message.
2. Use text_summarization_tool when the user needs to summarize long text, extract key points.
3. Use cn_en_translate_tool when the user needs Chinese-English mutual translation.

Rules to follow:
- Be polite and clear in your response.
- When using tools, follow the tool's parameter requirements.
- If the user's request is not related to your capabilities, politely remind the user of your core functions.
"""

# ---------------------- 步骤4:实例化Agent(整合所有配置和工具) ----------------------
text_pro_agent = Agent(
    name="TextProAgent",  # Agent的唯一名称
    conf=agent_config,    # 之前配置的LLM和Agent参数
    system_prompt=system_prompt,  # 定义Agent角色的系统提示词
    tool_names=[          # 注册当前Agent需要使用的本地工具(与上面定义的工具名一致)
        "custom_greeting_tool",
        "text_summarization_tool",
        "cn_en_translate_tool"
    ]
)

# ---------------------- 步骤5:验证Agent(可选,用于测试Agent是否创建成功) ----------------------
print("智能文本处理Agent创建成功!")
print(f"Agent名称:{text_pro_agent.name}")
print(f"已加载工具数量:{len(text_pro_agent.tool_names)}")



# ---------------------- 步骤6:通过 Runners 调用 Agent 执行具体任务 ----------------------
RUN_AGENT_TASKS = True  # 不依赖模型连接时,设为 False
def run_task(task_input: str, user_id: str = "user_001", session_id: str = "session_001") -> str:
    """统一用运行时入口执行,保证上下文/记忆/轨迹等能力生效。"""
    context_config = AmniConfigFactory.create(level=AmniConfigLevel.PILOT)
    task = Task(
        input=task_input,
        agent=text_pro_agent,
        user_id=user_id,
        session_id=session_id,
        context_config=context_config
    )
    result = Runners.sync_run_task(task=task)
    return result.get(task.id).answer

if RUN_AGENT_TASKS:
    print("\n" + "="*50)
    print("开始调用 Agent 执行任务...")
    print("="*50)

    # 任务1:测试 个性化问候工具(传入用户名)
    print("\n【任务1:个性化问候】")
    greeting_task = "请给我一个问候,我的名字叫小李"
    greeting_result = run_task(greeting_task)
    print("Agent 回复:", greeting_result)

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions