Skip to content

Commit c3f2da2

Browse files
committed
feat: Introduce ManagedAgent and AgentRunner implementations
feat: Add OpenAIAgentRunner with agentic tool-calling loop feat: Add LangChainAgentRunner with agentic tool-calling loop feat: Add OpenAIRunnerFactory.create_agent(config, tools) -> OpenAIAgentRunner feat: Add LangChainRunnerFactory.create_agent(config, tools) -> LangChainAgentRunner feat: Add ManagedAgent wrapper holding AgentRunner and LDAIConfigTracker feat: Add LDAIClient.create_agent() returning ManagedAgent
1 parent e2df180 commit c3f2da2

File tree

12 files changed

+873
-1
lines changed

12 files changed

+873
-1
lines changed

packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from ldai_langchain.langchain_agent_runner import LangChainAgentRunner
12
from ldai_langchain.langchain_helper import LangChainHelper
23
from ldai_langchain.langchain_model_runner import LangChainModelRunner
34
from ldai_langchain.langchain_runner_factory import LangChainRunnerFactory
@@ -9,4 +10,5 @@
910
'LangChainRunnerFactory',
1011
'LangChainHelper',
1112
'LangChainModelRunner',
13+
'LangChainAgentRunner',
1214
]
Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
"""LangChain agent runner for LaunchDarkly AI SDK."""
2+
3+
from typing import Any, Dict, List
4+
5+
from ldai import log
6+
from ldai.providers.types import LDAIMetrics
7+
from ldai.runners.agent_runner import AgentRunner
8+
from ldai.runners.types import AgentResult, ToolRegistry
9+
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage
10+
11+
from ldai_langchain.langchain_helper import LangChainHelper
12+
13+
14+
class LangChainAgentRunner(AgentRunner):
15+
"""
16+
AgentRunner implementation for LangChain.
17+
18+
Executes a single-agent loop using a LangChain BaseChatModel with tool calling.
19+
Returned by LangChainRunnerFactory.create_agent(config, tools).
20+
"""
21+
22+
def __init__(
23+
self,
24+
llm: Any,
25+
instructions: str,
26+
tool_definitions: List[Dict[str, Any]],
27+
tools: ToolRegistry,
28+
):
29+
self._llm = llm
30+
self._instructions = instructions
31+
self._tool_definitions = tool_definitions
32+
self._tools = tools
33+
34+
async def run(self, input: Any) -> AgentResult:
35+
"""
36+
Run the agent with the given input string.
37+
38+
Executes an agentic loop: calls the model, handles tool calls,
39+
and continues until the model produces a final response.
40+
41+
:param input: The user prompt or input to the agent
42+
:return: AgentResult with output, raw response, and aggregated metrics
43+
"""
44+
messages: List[BaseMessage] = []
45+
if self._instructions:
46+
messages.append(SystemMessage(content=self._instructions))
47+
messages.append(HumanMessage(content=str(input)))
48+
49+
openai_tools = self._build_openai_tools()
50+
model = self._llm.bind_tools(openai_tools) if openai_tools else self._llm
51+
52+
raw_response = None
53+
54+
try:
55+
while True:
56+
response: AIMessage = await model.ainvoke(messages)
57+
raw_response = response
58+
messages.append(response)
59+
60+
tool_calls = getattr(response, 'tool_calls', None)
61+
62+
if not tool_calls:
63+
metrics = LangChainHelper.get_ai_metrics_from_response(response)
64+
content = response.content if isinstance(response.content, str) else ""
65+
return AgentResult(
66+
output=content,
67+
raw=raw_response,
68+
metrics=metrics,
69+
)
70+
71+
# Execute tool calls and append results
72+
for tool_call in tool_calls:
73+
tool_name = tool_call["name"]
74+
tool_args = tool_call.get("args", {})
75+
tool_id = tool_call.get("id", "")
76+
77+
tool_fn = self._tools.get(tool_name)
78+
if tool_fn:
79+
try:
80+
result = tool_fn(**tool_args)
81+
if hasattr(result, "__await__"):
82+
result = await result
83+
result_str = str(result)
84+
except Exception as error:
85+
log.warning(f"Tool '{tool_name}' execution failed: {error}")
86+
result_str = f"Tool execution failed: {error}"
87+
else:
88+
log.warning(f"Tool '{tool_name}' not found in registry")
89+
result_str = f"Tool '{tool_name}' not found"
90+
91+
messages.append(ToolMessage(content=result_str, tool_call_id=tool_id))
92+
93+
except Exception as error:
94+
log.warning(f"LangChain agent run failed: {error}")
95+
return AgentResult(
96+
output="",
97+
raw=raw_response,
98+
metrics=LDAIMetrics(success=False, usage=None),
99+
)
100+
101+
def _build_openai_tools(self) -> List[Dict[str, Any]]:
102+
"""Convert LD tool definitions to OpenAI function-calling format for bind_tools."""
103+
tools = []
104+
for td in self._tool_definitions:
105+
if not isinstance(td, dict):
106+
continue
107+
if "type" in td:
108+
tools.append(td)
109+
elif "name" in td:
110+
tools.append({
111+
"type": "function",
112+
"function": {
113+
"name": td["name"],
114+
"description": td.get("description", ""),
115+
"parameters": td.get("parameters", {"type": "object", "properties": {}}),
116+
},
117+
})
118+
return tools
119+
120+
def get_llm(self) -> Any:
121+
"""Return the underlying LangChain LLM."""
122+
return self._llm

packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
from typing import Any
2+
13
from ldai.models import AIConfigKind
24
from ldai.providers import AIProvider
35

@@ -17,3 +19,22 @@ def create_model(self, config: AIConfigKind) -> LangChainModelRunner:
1719
"""
1820
llm = LangChainHelper.create_langchain_model(config)
1921
return LangChainModelRunner(llm)
22+
23+
def create_agent(self, config: Any, tools: Any) -> 'LangChainAgentRunner':
24+
"""
25+
Create a configured LangChainAgentRunner for the given AI agent config.
26+
27+
:param config: The LaunchDarkly AI agent configuration
28+
:param tools: ToolRegistry mapping tool names to callables
29+
:return: LangChainAgentRunner ready to run the agent
30+
"""
31+
from ldai_langchain.langchain_agent_runner import LangChainAgentRunner
32+
33+
config_dict = config.to_dict()
34+
model_dict = config_dict.get('model') or {}
35+
parameters = dict(model_dict.get('parameters') or {})
36+
tool_definitions = parameters.pop('tools', []) or []
37+
instructions = config.instructions or '' if hasattr(config, 'instructions') else ''
38+
39+
llm = LangChainHelper.create_langchain_model(config)
40+
return LangChainAgentRunner(llm, instructions, tool_definitions, tools or {})

packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py

Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -246,3 +246,125 @@ def test_returns_underlying_llm(self):
246246
runner = LangChainModelRunner(mock_llm)
247247

248248
assert runner.get_llm() is mock_llm
249+
250+
251+
class TestCreateAgent:
252+
"""Tests for LangChainRunnerFactory.create_agent."""
253+
254+
def test_creates_agent_runner_with_instructions_and_tool_definitions(self):
255+
"""Should create LangChainAgentRunner with instructions and tool definitions."""
256+
from unittest.mock import patch
257+
from ldai_langchain import LangChainAgentRunner
258+
259+
mock_ai_config = MagicMock()
260+
mock_ai_config.instructions = "You are a helpful assistant."
261+
mock_ai_config.to_dict.return_value = {
262+
'model': {
263+
'name': 'gpt-4',
264+
'parameters': {
265+
'tools': [
266+
{'name': 'get-weather', 'description': 'Get weather', 'parameters': {}},
267+
],
268+
},
269+
},
270+
'provider': {'name': 'openai'},
271+
}
272+
273+
with patch.object(LangChainHelper, 'create_langchain_model') as mock_create:
274+
mock_llm = MagicMock()
275+
mock_create.return_value = mock_llm
276+
277+
factory = LangChainRunnerFactory()
278+
result = factory.create_agent(mock_ai_config, {'get-weather': lambda loc: 'sunny'})
279+
280+
assert isinstance(result, LangChainAgentRunner)
281+
assert result._instructions == "You are a helpful assistant."
282+
assert len(result._tool_definitions) == 1
283+
284+
def test_creates_agent_runner_with_no_tools(self):
285+
"""Should create LangChainAgentRunner with no tool definitions."""
286+
from unittest.mock import patch
287+
from ldai_langchain import LangChainAgentRunner
288+
289+
mock_ai_config = MagicMock()
290+
mock_ai_config.instructions = "You are a helpful assistant."
291+
mock_ai_config.to_dict.return_value = {
292+
'model': {'name': 'gpt-4', 'parameters': {}},
293+
'provider': {'name': 'openai'},
294+
}
295+
296+
with patch.object(LangChainHelper, 'create_langchain_model') as mock_create:
297+
mock_create.return_value = MagicMock()
298+
299+
factory = LangChainRunnerFactory()
300+
result = factory.create_agent(mock_ai_config, {})
301+
302+
assert isinstance(result, LangChainAgentRunner)
303+
assert result._tool_definitions == []
304+
305+
306+
class TestLangChainAgentRunner:
307+
"""Tests for LangChainAgentRunner.run."""
308+
309+
@pytest.mark.asyncio
310+
async def test_runs_agent_and_returns_result_with_no_tool_calls(self):
311+
"""Should return AgentResult when model responds with no tool calls."""
312+
from ldai_langchain import LangChainAgentRunner
313+
from langchain_core.messages import AIMessage
314+
315+
mock_llm = MagicMock()
316+
mock_response = AIMessage(content="The answer is 42.")
317+
mock_llm.bind_tools = MagicMock(return_value=mock_llm)
318+
mock_llm.ainvoke = AsyncMock(return_value=mock_response)
319+
320+
runner = LangChainAgentRunner(mock_llm, "You are helpful.", [], {})
321+
result = await runner.run("What is the answer?")
322+
323+
assert result.output == "The answer is 42."
324+
assert result.metrics.success is True
325+
326+
@pytest.mark.asyncio
327+
async def test_executes_tool_calls_and_returns_final_response(self):
328+
"""Should execute tool calls and continue loop until final response."""
329+
from ldai_langchain import LangChainAgentRunner
330+
from langchain_core.messages import AIMessage
331+
332+
# First response: has a tool call
333+
first_response = AIMessage(content="")
334+
first_response.tool_calls = [
335+
{"name": "get-weather", "args": {"location": "Paris"}, "id": "call_123"}
336+
]
337+
338+
# Second response: final answer
339+
second_response = AIMessage(content="It is sunny in Paris.")
340+
341+
mock_llm = MagicMock()
342+
mock_llm.bind_tools = MagicMock(return_value=mock_llm)
343+
mock_llm.ainvoke = AsyncMock(side_effect=[first_response, second_response])
344+
345+
weather_fn = MagicMock(return_value="Sunny, 25°C")
346+
runner = LangChainAgentRunner(
347+
mock_llm, "You are helpful.",
348+
[{'name': 'get-weather', 'description': 'Get weather', 'parameters': {}}],
349+
{'get-weather': weather_fn},
350+
)
351+
result = await runner.run("What is the weather in Paris?")
352+
353+
assert result.output == "It is sunny in Paris."
354+
assert result.metrics.success is True
355+
weather_fn.assert_called_once_with(location="Paris")
356+
357+
@pytest.mark.asyncio
358+
async def test_returns_failure_when_exception_thrown(self):
359+
"""Should return unsuccessful AgentResult when exception is thrown."""
360+
from ldai_langchain import LangChainAgentRunner
361+
362+
mock_llm = MagicMock()
363+
mock_llm.bind_tools = MagicMock(return_value=mock_llm)
364+
mock_llm.ainvoke = AsyncMock(side_effect=Exception("LLM Error"))
365+
366+
runner = LangChainAgentRunner(mock_llm, "", [], {})
367+
result = await runner.run("Hello")
368+
369+
assert result.output == ""
370+
assert result.metrics.success is False

packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from ldai_openai.openai_agent_runner import OpenAIAgentRunner
12
from ldai_openai.openai_helper import OpenAIHelper
23
from ldai_openai.openai_model_runner import OpenAIModelRunner
34
from ldai_openai.openai_runner_factory import OpenAIRunnerFactory
@@ -6,4 +7,5 @@
67
'OpenAIRunnerFactory',
78
'OpenAIHelper',
89
'OpenAIModelRunner',
10+
'OpenAIAgentRunner',
911
]

0 commit comments

Comments
 (0)