Skip to content

Commit 6623eff

Browse files
authored
feat: add get_logs tool for pipeline resource (#64)
* feat: add formatting utility for pipeline logs * feat: add PipelineLogList import for formatting utils * feat: add get_pipeline_logs tool function * feat: import get_pipeline_logs_tool * feat: add get_pipeline_logs MCP tool * test: add import for get_pipeline_logs function * test: fix imports for pipeline models * test: add logs response and exception to FakePipelineResource * test: implement get_logs method in FakePipelineResource * test: add comprehensive tests for get_pipeline_logs function * test: add comprehensive tests for formatting utils * fix: format and lint
1 parent afb0118 commit 6623eff

5 files changed

Lines changed: 372 additions & 2 deletions

File tree

src/deepset_mcp/main.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from deepset_mcp.tools.pipeline import (
2121
create_pipeline as create_pipeline_tool,
2222
get_pipeline as get_pipeline_tool,
23+
get_pipeline_logs as get_pipeline_logs_tool,
2324
list_pipelines as list_pipelines_tool,
2425
update_pipeline as update_pipeline_tool,
2526
validate_pipeline as validate_pipeline_tool,
@@ -296,6 +297,29 @@ async def update_index(
296297
return response
297298

298299

300+
@mcp.tool()
301+
async def get_pipeline_logs(pipeline_name: str, limit: int = 30, level: str | None = None) -> str:
302+
"""Fetches logs for a specific pipeline in the deepset workspace.
303+
304+
Use this to debug pipeline issues, monitor pipeline execution, or understand what happened during pipeline runs.
305+
The logs provide detailed information about pipeline operations, errors, and warnings.
306+
307+
:param pipeline_name: Name of the pipeline to fetch logs for.
308+
:param limit: Maximum number of log entries to return (default: 30, max: 100).
309+
:param level: Filter logs by level. Valid values: 'info', 'warning', 'error'. If not specified, returns all levels.
310+
"""
311+
workspace = get_workspace()
312+
async with AsyncDeepsetClient() as client:
313+
response = await get_pipeline_logs_tool(
314+
client=client,
315+
workspace=workspace,
316+
pipeline_name=pipeline_name,
317+
limit=limit,
318+
level=level,
319+
)
320+
return response
321+
322+
299323
#
300324
#
301325
# @mcp.tool()

src/deepset_mcp/tools/formatting_utils.py

Lines changed: 57 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from deepset_mcp.api.pipeline.models import DeepsetPipeline, PipelineValidationResult
1+
from deepset_mcp.api.pipeline.models import DeepsetPipeline, PipelineLogList, PipelineValidationResult
22
from deepset_mcp.api.pipeline_template.models import PipelineTemplate
33

44

@@ -85,3 +85,59 @@ def validation_result_to_llm_readable_string(validation_result: PipelineValidati
8585
result_parts.append(f"Error {i}\n- Code: {error.code}\n- Message: {error.message}\n")
8686

8787
return "\n".join(result_parts)
88+
89+
90+
def pipeline_logs_to_llm_readable_string(logs: PipelineLogList, pipeline_name: str, level: str | None = None) -> str:
91+
"""Creates a string representation of pipeline logs that is readable by LLMs.
92+
93+
:param logs: The PipelineLogList containing log entries.
94+
:param pipeline_name: The name of the pipeline the logs are from.
95+
:param level: The log level filter that was applied, if any.
96+
97+
:returns: A formatted string representation of the logs.
98+
"""
99+
if not logs.data:
100+
filter_info = f" (filtered by level: {level})" if level else ""
101+
return f"No logs found for pipeline '{pipeline_name}'{filter_info}."
102+
103+
log_parts = [f"### Logs for Pipeline '{pipeline_name}'"]
104+
105+
if level:
106+
log_parts.append(f"**Filter Applied:** Level = {level}")
107+
108+
log_parts.extend(
109+
[
110+
f"**Total Logs:** {logs.total}",
111+
f"**Showing:** {len(logs.data)} entries",
112+
f"**Has More:** {'Yes' if logs.has_more else 'No'}",
113+
"\n---\n",
114+
]
115+
)
116+
117+
for i, log in enumerate(logs.data, 1):
118+
log_entry = [
119+
f"**Log Entry {i}**",
120+
f"- **Timestamp:** {log.logged_at.strftime('%B %d, %Y %I:%M:%S %p')}",
121+
f"- **Level:** {log.level}",
122+
f"- **Origin:** {log.origin}",
123+
f"- **Message:** {log.message}",
124+
]
125+
126+
if log.exceptions:
127+
log_entry.append(f"- **Exceptions:** {log.exceptions}")
128+
129+
if log.extra_fields:
130+
log_entry.append("- **Extra Fields:")
131+
for key, value in log.extra_fields.items():
132+
log_entry.append(f" - {key}: {value}")
133+
134+
log_parts.append("\n".join(log_entry))
135+
136+
# Add separator between log entries (except for the last one)
137+
if i < len(logs.data):
138+
log_parts.append("")
139+
140+
if logs.has_more:
141+
log_parts.append("\n*Note: There are more log entries available. Adjust the limit parameter to see more.*")
142+
143+
return "\n".join(log_parts)

src/deepset_mcp/tools/pipeline.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,3 +109,39 @@ async def update_pipeline(
109109
return f"Failed to update the pipeline '{pipeline_name}': {e}"
110110

111111
return f"The pipeline '{pipeline_name}' was successfully updated."
112+
113+
114+
async def get_pipeline_logs(
115+
client: AsyncClientProtocol,
116+
workspace: str,
117+
pipeline_name: str,
118+
limit: int = 30,
119+
level: str | None = None,
120+
) -> str:
121+
"""Fetches logs for a specific pipeline.
122+
123+
Retrieves log entries for the specified pipeline, with optional filtering by log level.
124+
This is useful for debugging pipeline issues or monitoring pipeline execution.
125+
126+
:param client: The async client for API communication.
127+
:param workspace: The workspace name.
128+
:param pipeline_name: Name of the pipeline to fetch logs for.
129+
:param limit: Maximum number of log entries to return (default: 30).
130+
:param level: Filter logs by level (info, warning, error). If None, returns all levels.
131+
132+
:returns: A formatted string containing the pipeline logs.
133+
"""
134+
try:
135+
logs = await client.pipelines(workspace=workspace).get_logs(
136+
pipeline_name=pipeline_name, limit=limit, level=level
137+
)
138+
except ResourceNotFoundError:
139+
return f"There is no pipeline named '{pipeline_name}' in workspace '{workspace}'."
140+
except BadRequestError as e:
141+
return f"Failed to fetch logs for pipeline '{pipeline_name}': {e}"
142+
except UnexpectedAPIError as e:
143+
return f"Failed to fetch logs for pipeline '{pipeline_name}': {e}"
144+
145+
from deepset_mcp.tools.formatting_utils import pipeline_logs_to_llm_readable_string
146+
147+
return pipeline_logs_to_llm_readable_string(logs, pipeline_name, level)
Lines changed: 154 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,154 @@
1+
from datetime import datetime
2+
3+
from deepset_mcp.api.pipeline.models import (
4+
PipelineLog,
5+
PipelineLogList,
6+
PipelineValidationResult,
7+
ValidationError,
8+
)
9+
from deepset_mcp.tools.formatting_utils import (
10+
pipeline_logs_to_llm_readable_string,
11+
validation_result_to_llm_readable_string,
12+
)
13+
14+
15+
def test_pipeline_logs_to_llm_readable_string_with_logs() -> None:
16+
"""Test formatting pipeline logs with data."""
17+
log1 = PipelineLog(
18+
log_id="log1",
19+
message="Pipeline started successfully",
20+
logged_at=datetime(2023, 1, 1, 12, 0, 0),
21+
level="info",
22+
origin="querypipeline",
23+
exceptions=None,
24+
extra_fields={},
25+
)
26+
log2 = PipelineLog(
27+
log_id="log2",
28+
message="Error processing document",
29+
logged_at=datetime(2023, 1, 1, 12, 1, 30),
30+
level="error",
31+
origin="querypipeline",
32+
exceptions="ValueError: Invalid document format",
33+
extra_fields={"component": "document_reader", "file_name": "test.pdf"},
34+
)
35+
36+
logs = PipelineLogList(data=[log1, log2], has_more=True, total=5)
37+
38+
result = pipeline_logs_to_llm_readable_string(logs, "test-pipeline")
39+
40+
# Check basic structure
41+
assert "### Logs for Pipeline 'test-pipeline'" in result
42+
assert "**Total Logs:** 5" in result
43+
assert "**Showing:** 2 entries" in result
44+
assert "**Has More:** Yes" in result
45+
46+
# Check log entry 1
47+
assert "**Log Entry 1**" in result
48+
assert "**Timestamp:** January 01, 2023 12:00:00 PM" in result
49+
assert "**Level:** info" in result
50+
assert "**Origin:** querypipeline" in result
51+
assert "**Message:** Pipeline started successfully" in result
52+
53+
# Check log entry 2
54+
assert "**Log Entry 2**" in result
55+
assert "**Timestamp:** January 01, 2023 12:01:30 PM" in result
56+
assert "**Level:** error" in result
57+
assert "**Message:** Error processing document" in result
58+
assert "**Exceptions:** ValueError: Invalid document format" in result
59+
assert "component: document_reader" in result
60+
assert "file_name: test.pdf" in result
61+
62+
# Check pagination note
63+
assert "*Note: There are more log entries available. Adjust the limit parameter to see more.*" in result
64+
65+
66+
def test_pipeline_logs_to_llm_readable_string_empty() -> None:
67+
"""Test formatting empty pipeline logs."""
68+
logs = PipelineLogList(data=[], has_more=False, total=0)
69+
70+
result = pipeline_logs_to_llm_readable_string(logs, "empty-pipeline")
71+
72+
assert result == "No logs found for pipeline 'empty-pipeline'."
73+
74+
75+
def test_pipeline_logs_to_llm_readable_string_empty_with_filter() -> None:
76+
"""Test formatting empty pipeline logs with level filter."""
77+
logs = PipelineLogList(data=[], has_more=False, total=0)
78+
79+
result = pipeline_logs_to_llm_readable_string(logs, "test-pipeline", level="error")
80+
81+
assert result == "No logs found for pipeline 'test-pipeline' (filtered by level: error)."
82+
83+
84+
def test_pipeline_logs_to_llm_readable_string_with_level_filter() -> None:
85+
"""Test formatting pipeline logs with level filter applied."""
86+
log = PipelineLog(
87+
log_id="log1",
88+
message="Critical error",
89+
logged_at=datetime(2023, 1, 1, 12, 0, 0),
90+
level="error",
91+
origin="querypipeline",
92+
exceptions=None,
93+
extra_fields={},
94+
)
95+
96+
logs = PipelineLogList(data=[log], has_more=False, total=1)
97+
98+
result = pipeline_logs_to_llm_readable_string(logs, "test-pipeline", level="error")
99+
100+
assert "### Logs for Pipeline 'test-pipeline'" in result
101+
assert "**Filter Applied:** Level = error" in result
102+
assert "**Total Logs:** 1" in result
103+
assert "**Showing:** 1 entries" in result
104+
assert "**Has More:** No" in result
105+
assert "**Message:** Critical error" in result
106+
107+
108+
def test_pipeline_logs_to_llm_readable_string_no_pagination_note() -> None:
109+
"""Test that pagination note is not shown when has_more is False."""
110+
log = PipelineLog(
111+
log_id="log1",
112+
message="Test message",
113+
logged_at=datetime(2023, 1, 1, 12, 0, 0),
114+
level="info",
115+
origin="querypipeline",
116+
exceptions=None,
117+
extra_fields={},
118+
)
119+
120+
logs = PipelineLogList(data=[log], has_more=False, total=1)
121+
122+
result = pipeline_logs_to_llm_readable_string(logs, "test-pipeline")
123+
124+
assert "*Note: There are more log entries available" not in result
125+
126+
127+
def test_validation_result_to_llm_readable_string_valid() -> None:
128+
"""Test formatting valid validation result."""
129+
result = PipelineValidationResult(valid=True, errors=[])
130+
131+
formatted = validation_result_to_llm_readable_string(result)
132+
133+
assert "The provided pipeline configuration is valid." in formatted
134+
assert "Validation Errors" not in formatted
135+
136+
137+
def test_validation_result_to_llm_readable_string_invalid() -> None:
138+
"""Test formatting invalid validation result."""
139+
errors = [
140+
ValidationError(code="YAML_ERROR", message="Syntax error in YAML"),
141+
ValidationError(code="COMPONENT_ERROR", message="Unknown component type"),
142+
]
143+
result = PipelineValidationResult(valid=False, errors=errors)
144+
145+
formatted = validation_result_to_llm_readable_string(result)
146+
147+
assert "The provided pipeline configuration is invalid." in formatted
148+
assert "**Validation Errors**" in formatted
149+
assert "Error 1" in formatted
150+
assert "- Code: YAML_ERROR" in formatted
151+
assert "- Message: Syntax error in YAML" in formatted
152+
assert "Error 2" in formatted
153+
assert "- Code: COMPONENT_ERROR" in formatted
154+
assert "- Message: Unknown component type" in formatted

0 commit comments

Comments
 (0)