Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import asyncio
import os

from arize.otel import register
from beeai_framework.adapters.openai import OpenAIChatModel
from beeai_framework.agents.experimental import RequirementAgent
from beeai_framework.agents.experimental.requirements.conditional import (
ConditionalRequirement,
)
from beeai_framework.errors import FrameworkError
from beeai_framework.middleware.trajectory import GlobalTrajectoryMiddleware
from beeai_framework.tools import Tool
from beeai_framework.tools.handoff import HandoffTool
from beeai_framework.tools.search.wikipedia import WikipediaTool
from beeai_framework.tools.think import ThinkTool
from beeai_framework.tools.weather import OpenMeteoTool
from dotenv import load_dotenv

from openinference.instrumentation.beeai import BeeAIInstrumentor

load_dotenv()

arize_space_id = os.getenv("ARIZE_SPACE_ID")
arize_api_key = os.getenv("ARIZE_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")

tracer_provider = register(
space_id=arize_space_id,
api_key=arize_api_key,
project_name="beeai-cookbook",
)

BeeAIInstrumentor().instrument(tracer_provider=tracer_provider)


async def main() -> None:
knowledge_agent = RequirementAgent(
llm=OpenAIChatModel(model="gpt-4o-mini"),
# llm=ChatModel.from_name("ollama:granite3.3:8b"),
tools=[ThinkTool(), WikipediaTool()],
requirements=[ConditionalRequirement(ThinkTool, force_at_step=1)],
role="Knowledge Specialist",
instructions="Provide answers to general questions about the world.",
)

weather_agent = RequirementAgent(
llm=OpenAIChatModel(model="gpt-4o-mini"),
tools=[OpenMeteoTool()],
role="Weather Specialist",
instructions="Provide weather forecast for a given destination.",
)

main_agent = RequirementAgent(
name="MainAgent",
llm=OpenAIChatModel(model="gpt-4o-mini"),
# llm=ChatModel.from_name("ollama:granite3.3:8b"),
tools=[
ThinkTool(),
HandoffTool(
knowledge_agent,
name="KnowledgeLookup",
description="Consult the Knowledge Agent for general questions.",
),
HandoffTool(
weather_agent,
name="WeatherLookup",
description="Consult the Weather Agent for forecasts.",
),
],
requirements=[ConditionalRequirement(ThinkTool, force_at_step=1)],
# Log all tool calls to the console for easier debugging
middlewares=[GlobalTrajectoryMiddleware(included=[Tool])],
)

question = (
"If I travel to Rome next weekend, what should I expect in terms of weather, "
"and also tell me one famous historical landmark there?"
)
print(f"User: {question}")

try:
response = await main_agent.run(question, expected_output="Helpful and clear response.")
print("Agent:", response)
except FrameworkError as err:
print("Error:", err.explain())


if __name__ == "__main__":
asyncio.run(main())
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
import asyncio

from beeai_framework.adapters.openai import OpenAIChatModel
from beeai_framework.agents.experimental import RequirementAgent
from beeai_framework.agents.experimental.requirements.conditional import (
ConditionalRequirement,
)
from beeai_framework.errors import FrameworkError
from beeai_framework.middleware.trajectory import GlobalTrajectoryMiddleware
from beeai_framework.tools import Tool
from beeai_framework.tools.handoff import HandoffTool
from beeai_framework.tools.search.wikipedia import WikipediaTool
from beeai_framework.tools.think import ThinkTool
from beeai_framework.tools.weather import OpenMeteoTool
from dotenv import load_dotenv
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk import trace as trace_sdk
from opentelemetry.sdk.trace.export import SimpleSpanProcessor

from openinference.instrumentation.beeai import BeeAIInstrumentor

load_dotenv()

endpoint = "http://localhost:6006/v1/traces"

tracer_provider = trace_sdk.TracerProvider()
tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint)))

BeeAIInstrumentor().instrument(tracer_provider=tracer_provider)


async def main() -> None:
knowledge_agent = RequirementAgent(
llm=OpenAIChatModel(model="gpt-4o-mini"),
# llm=ChatModel.from_name("ollama:granite3.3:8b"),
tools=[ThinkTool(), WikipediaTool()],
requirements=[ConditionalRequirement(ThinkTool, force_at_step=1)],
role="Knowledge Specialist",
instructions="Provide answers to general questions about the world.",
)

weather_agent = RequirementAgent(
llm=OpenAIChatModel(model="gpt-4o-mini"),
tools=[OpenMeteoTool()],
role="Weather Specialist",
instructions="Provide weather forecast for a given destination.",
)

main_agent = RequirementAgent(
name="MainAgent",
llm=OpenAIChatModel(model="gpt-4o-mini"),
# llm=ChatModel.from_name("ollama:granite3.3:8b"),
tools=[
ThinkTool(),
HandoffTool(
knowledge_agent,
name="KnowledgeLookup",
description="Consult the Knowledge Agent for general questions.",
),
HandoffTool(
weather_agent,
name="WeatherLookup",
description="Consult the Weather Agent for forecasts.",
),
],
requirements=[ConditionalRequirement(ThinkTool, force_at_step=1)],
# Log all tool calls to the console for easier debugging
middlewares=[GlobalTrajectoryMiddleware(included=[Tool])],
)

question = (
"If I travel to Rome next weekend, what should I expect in terms of weather, "
"and also tell me one famous historical landmark there?"
)
print(f"User: {question}")

try:
response = await main_agent.run(question, expected_output="Helpful and clear response.")
print("Agent:", response)
except FrameworkError as err:
print("Error:", err.explain())


if __name__ == "__main__":
asyncio.run(main())
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,12 @@ instruments = [
test = [
"beeai-framework >= 0.1.51",
"opentelemetry-sdk",
"opentelemetry-exporter-otlp"
"opentelemetry-exporter-otlp",
"pytest-recording",
"vcrpy>=5.0.0,<8.0.0",
"beeai-framework[duckduckgo]",
"pytest-asyncio",
"pytest",
]

[project.entry-points.opentelemetry_instrumentor]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from datetime import datetime
from typing import Any, ClassVar
from typing import Any, ClassVar, Dict, Generator, Tuple

from beeai_framework.backend import (
AnyMessage,
Expand All @@ -19,6 +19,7 @@
from beeai_framework.utils.lists import remove_falsy
from typing_extensions import override

from openinference.instrumentation import safe_json_dumps
from openinference.instrumentation.beeai._utils import (
_unpack_object,
safe_dump_model_schema,
Expand All @@ -36,6 +37,24 @@
)


def get_tool_parameters(tool: AnyTool) -> Dict[str, Any]:
tool_dict = tool.to_json_safe()
if "input_schema" in tool_dict:
input_schema = tool_dict.pop("input_schema")
tool_dict["parameters"] = input_schema or {}
tool_dict["parameters"]["type"] = "object"
return tool_dict


def get_tools(tools: list[AnyTool]) -> Generator[Tuple[str, str], None, None]:
for index, tool in enumerate(tools):
function = {"type": "function", "function": get_tool_parameters(tool)}
yield (
f"{SpanAttributes.LLM_TOOLS}.{index}.{ToolAttributes.TOOL_JSON_SCHEMA}",
safe_json_dumps(function),
)


class ChatModelProcessor(Processor):
kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.LLM

Expand Down Expand Up @@ -80,7 +99,7 @@ async def update(
)
self.span.set_attributes(
{
SpanAttributes.LLM_TOOLS: [t.name for t in (event.input.tools or [])],
**dict(get_tools(event.input.tools or [])),
SpanAttributes.LLM_INVOCATION_PARAMETERS: stringify(
meta.creator.parameters.model_dump(
exclude_none=True, exclude_unset=True
Expand Down
Loading