Skip to content

swarm handoff not able to handle multiple questions #105

@SparrowSolutions

Description

@SparrowSolutions

Hi,

I want to create a swarm that transfers among agents to create the complete answer to the user query.
Below is the code and the error i am getting, if someone can please help me resolving the issue :

from langchain_openai import ChatOpenAI

from langgraph.checkpoint.memory import InMemorySaver
from langgraph.prebuilt import create_react_agent
from langgraph_swarm import create_handoff_tool, create_swarm
from langchain_openai import AzureChatOpenAI
from langchain_core.runnables import ConfigurableField
from dotenv import load_dotenv
import os

load_dotenv()

model = AzureChatOpenAI(
            openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],
            azure_deployment=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
            temperature=0.00,
            verbose=True,
            max_tokens=None
            ).configurable_fields(
                temperature=ConfigurableField(
                                id="llm_temperature",
                                name="LLM Temperature",
                                description="The temperature of the LLM",
                                )
                )


def add(a: int, b: int) -> int:
    """Add two numbers"""
    return a + b

def subtract(a: int, b: int) -> int:
    """Subtract two numbers"""
    return a - b

alice = create_react_agent(
    model,
    [add, 
     create_handoff_tool(agent_name="Bob", description="Transfer to Bob, she can help answering in pirate style."),
     create_handoff_tool(agent_name="Hansel", description="Transfer to Hansel, she can help with Substraction.")],
    prompt="You are Alice, an addition expert. Check the last user query, acknowledge the already answered portion and answer only for addition  and handoff to the other for the rest of the query.Do not call more than one tool at a time.",
    name="Alice",
)

hansel = create_react_agent(
    model,
    [subtract, 
     create_handoff_tool(agent_name="Alice",description="Transfer to Alice, she can help with addition."),
     create_handoff_tool(agent_name="Bob", description="Transfer to Bob, she can help answering in pirate style.")],
    prompt="You are Hansel, an subtraction expert.  Check the last user query, acknowledge the already answered portion and answer only for substraction  and handoff to the other for the rest of the query.Do not call more than one tool at a time.",
    name="Hansel",
)

bob = create_react_agent(
    model,
    [create_handoff_tool(agent_name="Alice", description="Transfer to Alice, she can help with addition."),
     create_handoff_tool(agent_name="Hansel", description="Transfer to Hansel, she can help with Substraction.")],
    prompt="You are Bob, you speak like a pirate. Check the last user query, acknowledge the already answered portion and answer only non-addition and substraction question and handoff to the other agent for the rest of the query.Do not call more than one tool at a time.",
    name="Bob",
)

checkpointer = InMemorySaver()
workflow = create_swarm(
    [alice, bob, hansel],
    default_active_agent="Bob"
)
app = workflow.compile(checkpointer=checkpointer)

config = {"configurable": {"thread_id": "1"}}


turn_2 = app.invoke(
    {"messages": [{"role": "user", "content": "can you calculate 2+6-4?"}]},
    config,
)
print(turn_2["messages"][-1].pretty_print())

Error :

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[2], line 1
----> 1 turn_2 = app.invoke(
      2     {"messages": [{"role": "user", "content": "can you calculate 2+6-4?"}]},
      3     config,
      4 )
      5 print(turn_2["messages"][-1].pretty_print())

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\pregel\main.py:3085, in Pregel.invoke(self, input, config, context, stream_mode, print_mode, output_keys, interrupt_before, interrupt_after, durability, **kwargs)
   3082 chunks: list[dict[str, Any] | Any] = []
   3083 interrupts: list[Interrupt] = []
-> 3085 for chunk in self.stream(
   3086     input,
   3087     config,
   3088     context=context,
   3089     stream_mode=["updates", "values"]
   3090     if stream_mode == "values"
   3091     else stream_mode,
   3092     print_mode=print_mode,
   3093     output_keys=output_keys,
   3094     interrupt_before=interrupt_before,
   3095     interrupt_after=interrupt_after,
   3096     durability=durability,
   3097     **kwargs,
   3098 ):
   3099     if stream_mode == "values":
   3100         if len(chunk) == 2:

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\pregel\main.py:2674, in Pregel.stream(self, input, config, context, stream_mode, print_mode, output_keys, interrupt_before, interrupt_after, durability, subgraphs, debug, **kwargs)
   2672 for task in loop.match_cached_writes():
   2673     loop.output_writes(task.id, task.writes, cached=True)
-> 2674 for _ in runner.tick(
   2675     [t for t in loop.tasks.values() if not t.writes],
   2676     timeout=self.step_timeout,
   2677     get_waiter=get_waiter,
   2678     schedule_task=loop.accept_push,
   2679 ):
   2680     # emit output
   2681     yield from _output(
   2682         stream_mode, print_mode, subgraphs, stream.get, queue.Empty
   2683     )
   2684 loop.after_tick()

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\pregel\_runner.py:162, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter, schedule_task)
    160 t = tasks[0]
    161 try:
--> 162     run_with_retry(
    163         t,
    164         retry_policy,
    165         configurable={
    166             CONFIG_KEY_CALL: partial(
    167                 _call,
    168                 weakref.ref(t),
    169                 retry_policy=retry_policy,
    170                 futures=weakref.ref(futures),
    171                 schedule_task=schedule_task,
    172                 submit=self.submit,
    173             ),
    174         },
    175     )
    176     self.commit(t, None)
    177 except Exception as exc:

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\pregel\_retry.py:42, in run_with_retry(task, retry_policy, configurable)
     40     task.writes.clear()
     41     # run the task
---> 42     return task.proc.invoke(task.input, config)
     43 except ParentCommand as exc:
     44     ns: str = config[CONF][CONFIG_KEY_CHECKPOINT_NS]

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\_internal\_runnable.py:657, in RunnableSeq.invoke(self, input, config, **kwargs)
    655     # run in context
    656     with set_config_context(config, run) as context:
--> 657         input = context.run(step.invoke, input, config, **kwargs)
    658 else:
    659     input = step.invoke(input, config)

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\pregel\main.py:3085, in Pregel.invoke(self, input, config, context, stream_mode, print_mode, output_keys, interrupt_before, interrupt_after, durability, **kwargs)
   3082 chunks: list[dict[str, Any] | Any] = []
   3083 interrupts: list[Interrupt] = []
-> 3085 for chunk in self.stream(
   3086     input,
   3087     config,
   3088     context=context,
   3089     stream_mode=["updates", "values"]
   3090     if stream_mode == "values"
   3091     else stream_mode,
   3092     print_mode=print_mode,
   3093     output_keys=output_keys,
   3094     interrupt_before=interrupt_before,
   3095     interrupt_after=interrupt_after,
   3096     durability=durability,
   3097     **kwargs,
   3098 ):
   3099     if stream_mode == "values":
   3100         if len(chunk) == 2:

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\pregel\main.py:2674, in Pregel.stream(self, input, config, context, stream_mode, print_mode, output_keys, interrupt_before, interrupt_after, durability, subgraphs, debug, **kwargs)
   2672 for task in loop.match_cached_writes():
   2673     loop.output_writes(task.id, task.writes, cached=True)
-> 2674 for _ in runner.tick(
   2675     [t for t in loop.tasks.values() if not t.writes],
   2676     timeout=self.step_timeout,
   2677     get_waiter=get_waiter,
   2678     schedule_task=loop.accept_push,
   2679 ):
   2680     # emit output
   2681     yield from _output(
   2682         stream_mode, print_mode, subgraphs, stream.get, queue.Empty
   2683     )
   2684 loop.after_tick()

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\pregel\_runner.py:162, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter, schedule_task)
    160 t = tasks[0]
    161 try:
--> 162     run_with_retry(
    163         t,
    164         retry_policy,
    165         configurable={
    166             CONFIG_KEY_CALL: partial(
    167                 _call,
    168                 weakref.ref(t),
    169                 retry_policy=retry_policy,
    170                 futures=weakref.ref(futures),
    171                 schedule_task=schedule_task,
    172                 submit=self.submit,
    173             ),
    174         },
    175     )
    176     self.commit(t, None)
    177 except Exception as exc:

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\pregel\_retry.py:42, in run_with_retry(task, retry_policy, configurable)
     40     task.writes.clear()
     41     # run the task
---> 42     return task.proc.invoke(task.input, config)
     43 except ParentCommand as exc:
     44     ns: str = config[CONF][CONFIG_KEY_CHECKPOINT_NS]

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\_internal\_runnable.py:657, in RunnableSeq.invoke(self, input, config, **kwargs)
    655     # run in context
    656     with set_config_context(config, run) as context:
--> 657         input = context.run(step.invoke, input, config, **kwargs)
    658 else:
    659     input = step.invoke(input, config)

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\_internal\_runnable.py:394, in RunnableCallable.invoke(self, input, config, **kwargs)
    392     # run in context
    393     with set_config_context(child_config, run) as context:
--> 394         ret = context.run(self.func, *args, **kwargs)
    395 except BaseException as e:
    396     run_manager.on_chain_error(e)

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\prebuilt\chat_agent_executor.py:620, in create_react_agent.<locals>.call_model(state, runtime, config)
    613     msg = (
    614         "Async model callable provided but agent invoked synchronously. "
    615         "Use agent.ainvoke() or agent.astream(), or "
    616         "provide a sync model callable."
    617     )
    618     raise RuntimeError(msg)
--> 620 model_input = _get_model_input_state(state)
    622 if is_dynamic_model:
    623     # Resolve dynamic model at runtime and apply prompt
    624     dynamic_model = _resolve_model(state, runtime)

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\prebuilt\chat_agent_executor.py:599, in create_react_agent.<locals>._get_model_input_state(state)
    596 if messages is None:
    597     raise ValueError(error_msg)
--> 599 _validate_chat_history(messages)
    600 # we're passing messages under `messages` key, as this is expected by the prompt
    601 if isinstance(state_schema, type) and issubclass(state_schema, BaseModel):

File c:\01MyFolder\python-31210\py-repo\dev\agent_framework\Lib\site-packages\langgraph\prebuilt\chat_agent_executor.py:245, in _validate_chat_history(messages)
    236     return
    238 error_message = create_error_message(
    239     message="Found AIMessages with tool_calls that do not have a corresponding ToolMessage. "
    240     f"Here are the first few of those tool calls: {tool_calls_without_results[:3]}.\n\n"
   (...)    243     error_code=ErrorCode.INVALID_CHAT_HISTORY,
    244 )
--> 245 raise ValueError(error_message)

ValueError: Found AIMessages with tool_calls that do not have a corresponding ToolMessage. Here are the first few of those tool calls: [{'name': 'transfer_to_hansel', 'args': {}, 'id': 'call_6V2Ijs8KRkgPZjyflWtViuA4', 'type': 'tool_call'}].

Every tool call (LLM requesting to call a tool) in the message history MUST have a corresponding ToolMessage (result of a tool invocation to return to the LLM) - this is required by most LLM providers.
For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/INVALID_CHAT_HISTORY
During task with name 'agent' and id '58ef3fc6-0250-910f-03d0-c89147ed0647'
During task with name 'Alice' and id '48bdb416-82a7-1945-9132-a82a11be748a'

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions