Skip to content
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ on:
branches:
- feature/*
- feat/*
- chore/*
- bugfix/*
- hotfix/*
- fix/*
Expand Down
6 changes: 4 additions & 2 deletions dapr_agents/types/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,13 +70,15 @@ class AgentActorState(BaseModel):
"""Represents the state of an agent, tracking message history, task history, and overall status."""

messages: Optional[List[AgentActorMessage]] = Field(
default_factory=list, description="History of messages exchanged by the agent"
default_factory=list[AgentActorMessage],
description="History of messages exchanged by the agent",
)
message_count: int = Field(
0, description="Total number of messages exchanged by the agent"
)
task_history: Optional[List[AgentTaskEntry]] = Field(
default_factory=list, description="History of tasks the agent has performed"
default_factory=list[AgentTaskEntry],
description="History of tasks the agent has performed",
)
overall_status: AgentStatus = Field(
AgentStatus.IDLE, description="Current operational status of the agent"
Expand Down
6 changes: 3 additions & 3 deletions dapr_agents/types/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ class OpenAIModelConfig(OpenAIClientConfig):
type: Literal["openai"] = Field(
"openai", description="Type of the model, must always be 'openai'"
)
name: str = Field(default=None, description="Name of the OpenAI model")
name: str = Field(default="", description="Name of the OpenAI model")


class AzureOpenAIModelConfig(AzureOpenAIClientConfig):
Expand All @@ -157,7 +157,7 @@ class HFHubModelConfig(HFInferenceClientConfig):
"huggingface", description="Type of the model, must always be 'huggingface'"
)
name: str = Field(
default=None, description="Name of the model available through Hugging Face"
default="", description="Name of the model available through Hugging Face"
)


Expand All @@ -166,7 +166,7 @@ class NVIDIAModelConfig(NVIDIAClientConfig):
"nvidia", description="Type of the model, must always be 'nvidia'"
)
name: str = Field(
default=None, description="Name of the model available through NVIDIA"
default="", description="Name of the model available through NVIDIA"
)


Expand Down
3 changes: 2 additions & 1 deletion dapr_agents/types/message.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from typing import Any
from pydantic import (
BaseModel,
field_validator,
Expand Down Expand Up @@ -183,7 +184,7 @@ class ChatCompletion(BaseModel):
object: Optional[str] = None
usage: dict

def get_message(self) -> Optional[str]:
def get_message(self) -> Optional[Dict[str, Any]]:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I do not think this is correct because getting Content is just a plain string right?

def get_content(self) -> Optional[str]:
        """
        Retrieve the content from the first choice's message.
        """
        message = self.get_message()
        return message.get("content") if message else None

A Response Example -> Choices -> 0 -> Message -> Content right?

{
  "id": "chatcmpl-B9MBs8CjcvOU2jLn4n570S5qMJKcT",
  "object": "chat.completion",
  "created": 1741569952,
  "model": "gpt-4.1-2025-04-14",
  "choices": [
    {
      "index": 0,
      "message": {
        "role": "assistant",
        "content": "Hello! How can I assist you today?",
        "refusal": null,
        "annotations": []
      },
      "logprobs": null,
      "finish_reason": "stop"
    }
  ],
  "usage": {
    "prompt_tokens": 19,
    "completion_tokens": 10,
    "total_tokens": 29,
    "prompt_tokens_details": {
      "cached_tokens": 0,
      "audio_tokens": 0
    },
    "completion_tokens_details": {
      "reasoning_tokens": 0,
      "audio_tokens": 0,
      "accepted_prediction_tokens": 0,
      "rejected_prediction_tokens": 0
    }
  },
  "service_tier": "default"
}

So having Opetional[str] is the right thing here and no Optional[Dict[str, Any]] right?

Copy link
Contributor Author

@CasperGN CasperGN Apr 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Cyb3rWard0g yes, the content from get_content returns a plain string howeveer get_message returns from .model_dump() which returns a Dict[str, Any] :

    def model_dump(
        self,
        *,
        mode: Literal['json', 'python'] | str = 'python',
        include: IncEx | None = None,
        exclude: IncEx | None = None,
        context: Any | None = None,
        by_alias: bool | None = None,
        exclude_unset: bool = False,
        exclude_defaults: bool = False,
        exclude_none: bool = False,
        round_trip: bool = False,
        warnings: bool | Literal['none', 'warn', 'error'] = True,
        fallback: Callable[[Any], Any] | None = None,
        serialize_as_any: bool = False,
    ) -> dict[str, Any]:
...

This then also ensures that when get_content is fetched (which calls get_message) the .get("content") works since it's a dict (well, unless a None was returned).

"""
Retrieve the main message content from the first choice.
"""
Expand Down
2 changes: 1 addition & 1 deletion mypy.ini
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ ignore_errors = True
ignore_errors = True

[mypy-dapr_agents.types.*]
ignore_errors = True
disable_error_code = union-attr

[mypy-dapr_agents.workflow.*]
ignore_errors = True
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@ pyyaml==6.0.2
rich==13.9.4
huggingface_hub==0.30.2
numpy==2.2.2
mcp==1.6.0
mcp==1.6.0
dotenv==0.9.9