Skip to content

latest autogen #19

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 18 commits into from
Apr 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
99aac24
Agentchat canvas (#6215)
lspinheiro Apr 21, 2025
1de07ab
Generalize Continuous SystemMessage merging via model_info[“multiple_…
SongChiYoung Apr 21, 2025
4d3e47a
fix: ensure serialized messages are passed to LLMStreamStartEvent (#6…
peterj Apr 21, 2025
4d381d7
DOC: add extentions - autogen-oaiapi and autogen-contextplus (#6338)
SongChiYoung Apr 21, 2025
71363a3
Add experimental notice to canvas (#6349)
ekzhu Apr 21, 2025
9b0a0bd
FEAT: SelectorGroupChat could using stream inner select_prompt (#6286)
SongChiYoung Apr 21, 2025
89d77c7
Add an example using autogen-core and FastAPI to create streaming res…
ToryPan Apr 21, 2025
d051da5
fix: ollama fails when tools use optional args (#6343)
peterj Apr 22, 2025
0015315
Added support for exposing GPUs to docker code executor (#6339)
millerh1 Apr 22, 2025
f00f7d2
Avoid re-registering a message type already registered (#6354)
jorge-wonolo Apr 22, 2025
8a97292
Add azure ai agent (#6191)
abdomohamed Apr 22, 2025
b3f3731
Fix: deserialize model_context in AssistantAgent and SocietyOfMindAge…
SongChiYoung Apr 22, 2025
aad6caa
Add self-debugging loop to `CodeExecutionAgent` (#6306)
Ethan0456 Apr 22, 2025
b6935f9
update website version (#6364)
ekzhu Apr 22, 2025
a283d26
TEST/change gpt4, gpt4o serise to gpt4.1nano (#6375)
SongChiYoung Apr 23, 2025
8fcba01
Introduce workbench (#6340)
ekzhu Apr 24, 2025
f059262
Remove `name` field from OpenAI Assistant Message (#6388)
ekzhu Apr 24, 2025
770de74
Merge remote-tracking branch 'upstream/main' into peterj/autogenupdat…
peterj Apr 24, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/ISSUE_TEMPLATE/1-bug_report.yml
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ body:
multiple: false
options:
- "Python dev (main branch)"
- "Python 0.5.4"
- "Python 0.5.3"
- "Python 0.5.2"
- "Python 0.5.1"
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ jobs:
[
# For main use the workflow target
{ ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13", sphinx-release-override: "dev" },
{ ref: "python-v0.5.3", dest-dir: stable, uv-version: "0.5.13", sphinx-release-override: "stable" },
{ ref: "python-v0.5.4", dest-dir: stable, uv-version: "0.5.13", sphinx-release-override: "stable" },
{ ref: "v0.4.0.post1", dest-dir: "0.4.0", uv-version: "0.5.13", sphinx-release-override: "" },
{ ref: "v0.4.1", dest-dir: "0.4.1", uv-version: "0.5.13", sphinx-release-override: "" },
{ ref: "v0.4.2", dest-dir: "0.4.2", uv-version: "0.5.13", sphinx-release-override: "" },
Expand All @@ -47,6 +47,7 @@ jobs:
{ ref: "python-v0.5.1", dest-dir: "0.5.1", uv-version: "0.5.13", sphinx-release-override: "" },
{ ref: "python-v0.5.2", dest-dir: "0.5.2", uv-version: "0.5.13", sphinx-release-override: "" },
{ ref: "python-v0.5.3", dest-dir: "0.5.3", uv-version: "0.5.13", sphinx-release-override: "" },
{ ref: "python-v0.5.4", dest-dir: "0.5.4", uv-version: "0.5.13", sphinx-release-override: "" },
]
steps:
- name: Checkout
Expand Down
7 changes: 6 additions & 1 deletion docs/switcher.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,16 @@
"url": "/autogen/dev/"
},
{
"name": "0.5.3 (stable)",
"name": "0.5.4 (stable)",
"version": "stable",
"url": "/autogen/stable/",
"preferred": true
},
{
"name": "0.5.3",
"version": "0.5.3",
"url": "/autogen/0.5.3/"
},
{
"name": "0.5.2",
"version": "0.5.2",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1337,7 +1337,7 @@ def _from_config(cls, config: AssistantAgentConfig) -> Self:
model_client=ChatCompletionClient.load_component(config.model_client),
tools=[BaseTool.load_component(tool) for tool in config.tools] if config.tools else None,
handoffs=config.handoffs,
model_context=None,
model_context=ChatCompletionContext.load_component(config.model_context) if config.model_context else None,
memory=[Memory.load_component(memory) for memory in config.memory] if config.memory else None,
description=config.description,
system_message=config.system_message,
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,7 @@ def _to_config(self) -> SocietyOfMindAgentConfig:
description=self.description,
instruction=self._instruction,
response_prompt=self._response_prompt,
model_context=self._model_context.dump_component(),
)

@classmethod
Expand All @@ -299,4 +300,5 @@ def _from_config(cls, config: SocietyOfMindAgentConfig) -> Self:
description=config.description or cls.DEFAULT_DESCRIPTION,
instruction=config.instruction or cls.DEFAULT_INSTRUCTION,
response_prompt=config.response_prompt or cls.DEFAULT_RESPONSE_PROMPT,
model_context=ChatCompletionContext.load_component(config.model_context) if config.model_context else None,
)
Original file line number Diff line number Diff line change
Expand Up @@ -433,22 +433,33 @@ def to_text(self) -> str:


class CodeGenerationEvent(BaseAgentEvent):
"""An event signaling code generation for execution."""
"""An event signaling code generation event."""

retry_attempt: int
"Retry number, 0 means first generation"

content: str
"The complete content as string."

type: Literal["CodeGenerationEvent"] = "CodeGenerationEvent"

code_blocks: List[CodeBlock]
"List of code blocks present in content"

type: Literal["CodeGenerationEvent"] = "CodeGenerationEvent"

def to_text(self) -> str:
return self.content


class CodeExecutionEvent(BaseAgentEvent):
type: Literal["CodeExecutionEvent"] = "CodeExecutionEvent"
"""An event signaling code execution event."""

retry_attempt: int
"Retry number, 0 means first execution"

result: CodeResult
"Code Execution Result"

type: Literal["CodeExecutionEvent"] = "CodeExecutionEvent"

def to_text(self) -> str:
return self.result.output
Expand Down Expand Up @@ -531,6 +542,18 @@ def to_text(self) -> str:
return str(self.content)


class SelectorEvent(BaseAgentEvent):
"""An event emitted from the `SelectorGroupChat`."""

content: str
"""The content of the event."""

type: Literal["SelectorEvent"] = "SelectorEvent"

def to_text(self) -> str:
return str(self.content)


class MessageFactory:
""":meta private:

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@ def __init__(
for agent in participants:
for message_type in agent.produced_message_types:
try:
if issubclass(message_type, StructuredMessage):
is_registered = self._message_factory.is_registered(message_type) # type: ignore[reportUnknownArgumentType]
if issubclass(message_type, StructuredMessage) and not is_registered:
self._message_factory.register(message_type) # type: ignore[reportUnknownArgumentType]
except TypeError:
# Not a class or not a valid subclassable type (skip)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,14 @@
from typing import Any, Awaitable, Callable, Dict, List, Mapping, Optional, Sequence, Union, cast

from autogen_core import AgentRuntime, Component, ComponentModel
from autogen_core.models import AssistantMessage, ChatCompletionClient, ModelFamily, SystemMessage, UserMessage
from autogen_core.models import (
AssistantMessage,
ChatCompletionClient,
CreateResult,
ModelFamily,
SystemMessage,
UserMessage,
)
from pydantic import BaseModel
from typing_extensions import Self

Expand All @@ -16,6 +23,8 @@
BaseAgentEvent,
BaseChatMessage,
MessageFactory,
ModelClientStreamingChunkEvent,
SelectorEvent,
)
from ...state import SelectorManagerState
from ._base_group_chat import BaseGroupChat
Expand Down Expand Up @@ -56,6 +65,7 @@ def __init__(
max_selector_attempts: int,
candidate_func: Optional[CandidateFuncType],
emit_team_events: bool,
model_client_streaming: bool = False,
) -> None:
super().__init__(
name,
Expand All @@ -79,6 +89,7 @@ def __init__(
self._max_selector_attempts = max_selector_attempts
self._candidate_func = candidate_func
self._is_candidate_func_async = iscoroutinefunction(self._candidate_func)
self._model_client_streaming = model_client_streaming

async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None:
pass
Expand Down Expand Up @@ -194,7 +205,26 @@ async def _select_speaker(self, roles: str, participants: List[str], history: st
num_attempts = 0
while num_attempts < max_attempts:
num_attempts += 1
response = await self._model_client.create(messages=select_speaker_messages)
if self._model_client_streaming:
chunk: CreateResult | str = ""
async for _chunk in self._model_client.create_stream(messages=select_speaker_messages):
chunk = _chunk
if self._emit_team_events:
if isinstance(chunk, str):
await self._output_message_queue.put(
ModelClientStreamingChunkEvent(content=cast(str, _chunk), source=self._name)
)
else:
assert isinstance(chunk, CreateResult)
assert isinstance(chunk.content, str)
await self._output_message_queue.put(
SelectorEvent(content=chunk.content, source=self._name)
)
# The last chunk must be CreateResult.
assert isinstance(chunk, CreateResult)
response = chunk
else:
response = await self._model_client.create(messages=select_speaker_messages)
assert isinstance(response.content, str)
select_speaker_messages.append(AssistantMessage(content=response.content, source="selector"))
# NOTE: we use all participant names to check for mentions, even if the previous speaker is not allowed.
Expand Down Expand Up @@ -281,6 +311,7 @@ class SelectorGroupChatConfig(BaseModel):
# selector_func: ComponentModel | None
max_selector_attempts: int = 3
emit_team_events: bool = False
model_client_streaming: bool = False


class SelectorGroupChat(BaseGroupChat, Component[SelectorGroupChatConfig]):
Expand Down Expand Up @@ -311,6 +342,7 @@ class SelectorGroupChat(BaseGroupChat, Component[SelectorGroupChatConfig]):
selection using model. If the function returns an empty list or `None`, `SelectorGroupChat` will raise a `ValueError`.
This function is only used if `selector_func` is not set. The `allow_repeated_speaker` will be ignored if set.
emit_team_events (bool, optional): Whether to emit team events through :meth:`BaseGroupChat.run_stream`. Defaults to False.
model_client_streaming (bool, optional): Whether to use streaming for the model client. (This is useful for reasoning models like QwQ). Defaults to False.

Raises:
ValueError: If the number of participants is less than two or if the selector prompt is invalid.
Expand Down Expand Up @@ -453,6 +485,7 @@ def __init__(
candidate_func: Optional[CandidateFuncType] = None,
custom_message_types: List[type[BaseAgentEvent | BaseChatMessage]] | None = None,
emit_team_events: bool = False,
model_client_streaming: bool = False,
):
super().__init__(
participants,
Expand All @@ -473,6 +506,7 @@ def __init__(
self._selector_func = selector_func
self._max_selector_attempts = max_selector_attempts
self._candidate_func = candidate_func
self._model_client_streaming = model_client_streaming

def _create_group_chat_manager_factory(
self,
Expand Down Expand Up @@ -505,6 +539,7 @@ def _create_group_chat_manager_factory(
self._max_selector_attempts,
self._candidate_func,
self._emit_team_events,
self._model_client_streaming,
)

def _to_config(self) -> SelectorGroupChatConfig:
Expand All @@ -518,6 +553,7 @@ def _to_config(self) -> SelectorGroupChatConfig:
max_selector_attempts=self._max_selector_attempts,
# selector_func=self._selector_func.dump_component() if self._selector_func else None,
emit_team_events=self._emit_team_events,
model_client_streaming=self._model_client_streaming,
)

@classmethod
Expand All @@ -536,4 +572,5 @@ def _from_config(cls, config: SelectorGroupChatConfig) -> Self:
# if config.selector_func
# else None,
emit_team_events=config.emit_team_events,
model_client_streaming=config.model_client_streaming,
)
126 changes: 126 additions & 0 deletions python/packages/autogen-agentchat/tests/test_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
import pytest
from autogen_agentchat.agents import (
AssistantAgent,
CodeExecutorAgent,
SocietyOfMindAgent,
)
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_core.model_context import (
BufferedChatCompletionContext,
ChatCompletionContext,
HeadAndTailChatCompletionContext,
TokenLimitedChatCompletionContext,
UnboundedChatCompletionContext,
)
from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor
from autogen_ext.models.replay import ReplayChatCompletionClient


@pytest.mark.parametrize(
"model_context_class",
[
UnboundedChatCompletionContext(),
BufferedChatCompletionContext(buffer_size=5),
TokenLimitedChatCompletionContext(model_client=ReplayChatCompletionClient([]), token_limit=5),
HeadAndTailChatCompletionContext(head_size=3, tail_size=2),
],
)
def test_serialize_and_deserialize_model_context_on_assistant_agent(model_context_class: ChatCompletionContext) -> None:
"""Test the serialization and deserialization of the message context on the AssistantAgent."""
agent = AssistantAgent(
name="assistant",
model_client=ReplayChatCompletionClient([]),
description="An assistant agent.",
model_context=model_context_class,
)

# Serialize the agent
serialized_agent = agent.dump_component()
# Deserialize the agent
deserialized_agent = AssistantAgent.load_component(serialized_agent)

# Check that the deserialized agent has the same model context as the original agent
original_model_context = agent.model_context
deserialized_model_context = deserialized_agent.model_context

assert isinstance(original_model_context, type(deserialized_model_context))
assert isinstance(deserialized_model_context, type(original_model_context))
assert original_model_context.dump_component() == deserialized_model_context.dump_component()


@pytest.mark.parametrize(
"model_context_class",
[
UnboundedChatCompletionContext(),
BufferedChatCompletionContext(buffer_size=5),
TokenLimitedChatCompletionContext(model_client=ReplayChatCompletionClient([]), token_limit=5),
HeadAndTailChatCompletionContext(head_size=3, tail_size=2),
],
)
def test_serialize_and_deserialize_model_context_on_society_of_mind_agent(
model_context_class: ChatCompletionContext,
) -> None:
"""Test the serialization and deserialization of the message context on the AssistantAgent."""
agent1 = AssistantAgent(
name="assistant1", model_client=ReplayChatCompletionClient([]), description="An assistant agent."
)
agent2 = AssistantAgent(
name="assistant2", model_client=ReplayChatCompletionClient([]), description="An assistant agent."
)
team = RoundRobinGroupChat(
participants=[agent1, agent2],
)
agent = SocietyOfMindAgent(
name="assistant",
model_client=ReplayChatCompletionClient([]),
description="An assistant agent.",
team=team,
model_context=model_context_class,
)

# Serialize the agent
serialized_agent = agent.dump_component()
# Deserialize the agent
deserialized_agent = SocietyOfMindAgent.load_component(serialized_agent)

# Check that the deserialized agent has the same model context as the original agent
original_model_context = agent.model_context
deserialized_model_context = deserialized_agent.model_context

assert isinstance(original_model_context, type(deserialized_model_context))
assert isinstance(deserialized_model_context, type(original_model_context))
assert original_model_context.dump_component() == deserialized_model_context.dump_component()


@pytest.mark.parametrize(
"model_context_class",
[
UnboundedChatCompletionContext(),
BufferedChatCompletionContext(buffer_size=5),
TokenLimitedChatCompletionContext(model_client=ReplayChatCompletionClient([]), token_limit=5),
HeadAndTailChatCompletionContext(head_size=3, tail_size=2),
],
)
def test_serialize_and_deserialize_model_context_on_code_executor_agent(
model_context_class: ChatCompletionContext,
) -> None:
"""Test the serialization and deserialization of the message context on the AssistantAgent."""
agent = CodeExecutorAgent(
name="assistant",
code_executor=LocalCommandLineCodeExecutor(),
description="An assistant agent.",
model_context=model_context_class,
)

# Serialize the agent
serialized_agent = agent.dump_component()
# Deserialize the agent
deserialized_agent = CodeExecutorAgent.load_component(serialized_agent)

# Check that the deserialized agent has the same model context as the original agent
original_model_context = agent.model_context
deserialized_model_context = deserialized_agent.model_context

assert isinstance(original_model_context, type(deserialized_model_context))
assert isinstance(deserialized_model_context, type(original_model_context))
assert original_model_context.dump_component() == deserialized_model_context.dump_component()
Loading
Loading