Skip to content

Commit 341b5a3

Browse files
committed
Minor fixes
1 parent 7c71372 commit 341b5a3

7 files changed

Lines changed: 645 additions & 1617 deletions

File tree

agents/.DS_Store

0 Bytes
Binary file not shown.

agents/chef_analysis/agent.py

Lines changed: 77 additions & 203 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,3 @@
1-
"""
2-
Enhanced ChefAnalysisAgent with Tree-sitter Integration
3-
Logs all files received before analysis for full observability.
4-
"""
5-
61
import time
72
import uuid
83
import json
@@ -16,28 +11,31 @@
1611
from agents.chef_analysis.processor import extract_and_validate_analysis
1712
from shared.exceptions import CookbookAnalysisError
1813
from shared.log_utils import create_chef_logger, ChefAnalysisLogger
19-
2014
from shared.tree_sitter_analyzer import TreeSitterAnalyzer
2115

2216
logger = logging.getLogger(__name__)
2317

2418
class ChefAnalysisAgent:
2519
"""
26-
Enhanced ChefAnalysisAgent with Tree-sitter integration and input logging.
20+
ChefAnalysisAgent: all instructions/prompt templates are from config (no hardcoding).
2721
"""
2822

2923
def __init__(
30-
self,
31-
client: LlamaStackClient,
32-
agent_id: str,
33-
session_id: str,
34-
timeout: int = 120
24+
self,
25+
client: LlamaStackClient,
26+
agent_id: str,
27+
session_id: str,
28+
instruction: str,
29+
enhanced_prompt_template: str,
30+
timeout: int = 120,
3531
):
3632
self.client = client
3733
self.agent_id = agent_id
3834
self.session_id = session_id
3935
self.timeout = timeout
4036
self.logger = create_chef_logger("init")
37+
self.instruction = instruction
38+
self.enhanced_prompt_template = enhanced_prompt_template
4139

4240
try:
4341
self.tree_sitter = TreeSitterAnalyzer()
@@ -228,13 +226,9 @@ async def _analyze_with_enhanced_prompt(
228226
session_id: str
229227
) -> Dict[str, Any]:
230228
try:
231-
step_logger.info(f"[{correlation_id}] 🧠 Creating enhanced prompt with verified facts")
232-
if tree_sitter_facts.get('tree_sitter_enabled', False):
233-
enhanced_prompt = self._create_enhanced_analysis_prompt(cookbook_content, tree_sitter_facts)
234-
step_logger.info(f"[{correlation_id}] 📝 Using Tree-sitter enhanced prompt")
235-
else:
236-
enhanced_prompt = self._create_analysis_prompt(cookbook_content)
237-
step_logger.info(f"[{correlation_id}] 📝 Using standard prompt (Tree-sitter unavailable)")
229+
step_logger.info(f"[{correlation_id}] 🧠 Creating enhanced prompt from config")
230+
enhanced_prompt = self._create_enhanced_analysis_prompt(cookbook_content, tree_sitter_facts)
231+
step_logger.info(f"[{correlation_id}] 📝 Using enhanced prompt (from YAML config)")
238232
result = await self._analyze_direct(enhanced_prompt, correlation_id, step_logger, session_id)
239233
if result and result.get("success") and not result.get("postprocess_error"):
240234
step_logger.info(f"[{correlation_id}] LlamaStack agent analysis succeeded")
@@ -247,58 +241,46 @@ async def _analyze_with_enhanced_prompt(
247241
return self._create_intelligent_fallback_from_facts(tree_sitter_facts, correlation_id, cookbook_content)
248242

249243
def _create_enhanced_analysis_prompt(self, cookbook_content: str, tree_sitter_facts: Dict[str, Any]) -> str:
250-
resources = tree_sitter_facts['resources']
251-
deps = tree_sitter_facts['dependencies']
252-
metadata = tree_sitter_facts['metadata']
253-
summary = tree_sitter_facts['summary']
254-
return f"""You are analyzing a Chef cookbook.
255-
256-
Verified facts (from Tree-sitter):
257-
- Cookbook: {metadata.get('name', 'unknown')} v{metadata.get('version', 'unknown')}
258-
- Packages: {resources['packages']}
259-
- Services: {resources['services']}
260-
- Files: {resources['files']}
261-
- Templates: {resources['templates']}
262-
- Direct Deps: {deps['cookbook_deps']}
263-
- Wrapper Cookbooks: {deps['include_recipes']}
264-
- Complexity Score: {summary['complexity_score']}
265-
- Total Resources: {summary['total_resources']}
266-
267-
Now, return only this JSON structure filled in with accurate values based on the above:
268-
269-
{{
270-
"success": true,
271-
"version_requirements": {{
272-
"min_chef_version": "string",
273-
"min_ruby_version": "string",
274-
"migration_effort": "LOW|MEDIUM|HIGH",
275-
"estimated_hours": number
276-
}},
277-
"dependencies": {{
278-
"is_wrapper": boolean,
279-
"direct_deps": {json.dumps(deps['cookbook_deps'])},
280-
"runtime_deps": [],
281-
"circular_risk": "none|low|high"
282-
}},
283-
"functionality": {{
284-
"primary_purpose": "string",
285-
"services": {json.dumps(resources['services'])},
286-
"packages": {json.dumps(resources['packages'])},
287-
"files_managed": {json.dumps(resources['files'])}
288-
}},
289-
"recommendations": {{
290-
"consolidation_action": "REUSE|EXTEND|REWRITE",
291-
"rationale": "string",
292-
"migration_priority": "LOW|MEDIUM|HIGH"
293-
}},
294-
"complexity_level": "Low|Medium|High",
295-
"detailed_analysis": "string",
296-
"key_operations": ["string"],
297-
"configuration_details": "string",
298-
"conversion_notes": "string"
299-
}}
300-
301-
DO NOT return Markdown, explanations, or prose. Respond ONLY with this JSON object."""
244+
facts_str = json.dumps(tree_sitter_facts, indent=2)
245+
return self.enhanced_prompt_template.format(
246+
instruction=self.instruction,
247+
cookbook_content=cookbook_content,
248+
tree_sitter_facts=facts_str
249+
)
250+
251+
async def _analyze_direct(
252+
self,
253+
prompt: str,
254+
correlation_id: str,
255+
step_logger: ChefAnalysisLogger,
256+
session_id: str
257+
) -> Optional[Dict[str, Any]]:
258+
try:
259+
messages = [UserMessage(role="user", content=prompt)]
260+
generator = self.client.agents.turn.create(
261+
agent_id=self.agent_id,
262+
session_id=session_id,
263+
messages=messages,
264+
stream=True,
265+
)
266+
turn = None
267+
for chunk in generator:
268+
event = chunk.event
269+
event_type = event.payload.event_type
270+
if event_type == "turn_complete":
271+
turn = event.payload.turn
272+
break
273+
if not turn:
274+
step_logger.error("No turn completed in LlamaStack response")
275+
return None
276+
raw_response = turn.output_message.content
277+
step_logger.info(f"📥 Received LlamaStack response: {len(raw_response)} chars")
278+
result = extract_and_validate_analysis(raw_response, correlation_id, prompt[:500])
279+
step_logger.info(f"🔍 Processor result: success={result.get('success')}")
280+
return result
281+
except Exception as e:
282+
step_logger.error(f" LlamaStack analysis failed: {e}")
283+
return None
302284

303285
def _merge_analysis_results(
304286
self,
@@ -468,121 +450,6 @@ async def _analyze_with_retries(
468450
logger.warning("⚠️ LLM analysis failed - processor will handle intelligent fallback")
469451
return extract_and_validate_analysis("{}", correlation_id, cookbook_content)
470452

471-
async def _analyze_direct(
472-
self,
473-
prompt: str,
474-
correlation_id: str,
475-
step_logger: ChefAnalysisLogger,
476-
session_id: str
477-
) -> Optional[Dict[str, Any]]:
478-
try:
479-
messages = [UserMessage(role="user", content=prompt)]
480-
generator = self.client.agents.turn.create(
481-
agent_id=self.agent_id,
482-
session_id=session_id,
483-
messages=messages,
484-
stream=True,
485-
)
486-
turn = None
487-
for chunk in generator:
488-
event = chunk.event
489-
event_type = event.payload.event_type
490-
if event_type == "turn_complete":
491-
turn = event.payload.turn
492-
break
493-
if not turn:
494-
step_logger.error("No turn completed in LlamaStack response")
495-
return None
496-
raw_response = turn.output_message.content
497-
step_logger.info(f"📥 Received LlamaStack response: {len(raw_response)} chars")
498-
result = extract_and_validate_analysis(raw_response, correlation_id, prompt[:500])
499-
step_logger.info(f"🔍 Processor result: success={result.get('success')}")
500-
return result
501-
except Exception as e:
502-
step_logger.error(f" LlamaStack analysis failed: {e}")
503-
return None
504-
505-
def _create_analysis_prompt(self, cookbook_content: str) -> str:
506-
return f"""Analyze this Chef cookbook and provide a comprehensive analysis. Return ONLY valid JSON with your analysis.
507-
508-
<COOKBOOK>
509-
{cookbook_content}
510-
</COOKBOOK>
511-
512-
Please analyze the cookbook and provide the following information in JSON format:
513-
514-
1. VERSION REQUIREMENTS:
515-
- Minimum Chef version required (if determinable)
516-
- Minimum Ruby version required (if determinable)
517-
- Migration effort estimate (LOW/MEDIUM/HIGH)
518-
- Estimated migration hours
519-
- Any deprecated features found
520-
521-
2. DEPENDENCIES:
522-
- Whether this is a wrapper cookbook
523-
- List of wrapped cookbooks (from include_recipe calls)
524-
- Direct dependencies (from metadata.rb)
525-
- Runtime dependencies
526-
- Circular dependency risk assessment
527-
528-
3. FUNCTIONALITY:
529-
- Primary purpose of the cookbook
530-
- Services managed
531-
- Packages installed
532-
- Key files/directories managed
533-
- Reusability level (LOW/MEDIUM/HIGH)
534-
- Customization points
535-
536-
4. RECOMMENDATIONS:
537-
- Consolidation action (REUSE/EXTEND/RECREATE)
538-
- Detailed rationale
539-
- Migration priority (LOW/MEDIUM/HIGH/CRITICAL)
540-
- Risk factors to consider
541-
- Recommended migration steps
542-
543-
Return the analysis in this JSON structure:
544-
{{
545-
"success": true,
546-
"version_requirements": {{
547-
"min_chef_version": "version or null",
548-
"min_ruby_version": "version or null",
549-
"migration_effort": "LOW|MEDIUM|HIGH",
550-
"estimated_hours": number_or_null,
551-
"deprecated_features": ["list of deprecated features"]
552-
}},
553-
"dependencies": {{
554-
"is_wrapper": true_or_false,
555-
"wrapped_cookbooks": ["list of cookbooks"],
556-
"direct_deps": ["dependencies from metadata"],
557-
"runtime_deps": ["runtime dependencies"],
558-
"circular_risk": "none|low|medium|high"
559-
}},
560-
"functionality": {{
561-
"primary_purpose": "description",
562-
"services": ["list of services"],
563-
"packages": ["list of packages"],
564-
"files_managed": ["key files/directories"],
565-
"reusability": "LOW|MEDIUM|HIGH",
566-
"customization_points": ["customization areas"]
567-
}},
568-
"recommendations": {{
569-
"consolidation_action": "REUSE|EXTEND|RECREATE",
570-
"rationale": "detailed explanation",
571-
"migration_priority": "LOW|MEDIUM|HIGH|CRITICAL",
572-
"risk_factors": ["list of risks"],
573-
"migration_steps": ["recommended steps"]
574-
}}
575-
}}
576-
577-
CRITICAL: Return ONLY the JSON object with your actual analysis values."""
578-
579-
def _format_cookbook_content(self, cookbook_name: str, files: Dict[str, str]) -> str:
580-
content_parts = [f"Cookbook Name: {cookbook_name}"]
581-
for filename, content in files.items():
582-
content_parts.append(f"\n=== File: {filename} ===")
583-
content_parts.append(content.strip())
584-
return "\n".join(content_parts)
585-
586453
async def analyze_cookbook_stream(
587454
self,
588455
cookbook_data: Dict[str, Any],
@@ -625,6 +492,30 @@ async def analyze_cookbook_stream(
625492
"correlation_id": correlation_id
626493
}
627494

495+
def _format_cookbook_content(self, cookbook_name: str, files: Dict[str, str]) -> str:
496+
content_parts = [f"Cookbook Name: {cookbook_name}"]
497+
for filename, content in files.items():
498+
content_parts.append(f"\n=== File: {filename} ===")
499+
content_parts.append(content.strip())
500+
return "\n".join(content_parts)
501+
502+
async def health_check(self) -> bool:
503+
try:
504+
messages = [UserMessage(role="user", content="Health check - please respond with 'OK'")]
505+
generator = self.client.agents.turn.create(
506+
agent_id=self.agent_id,
507+
session_id=self.session_id,
508+
messages=messages,
509+
stream=True,
510+
)
511+
for chunk in generator:
512+
break
513+
self.logger.info(" Health check passed")
514+
return True
515+
except Exception as e:
516+
self.logger.error(f" Health check failed: {e}")
517+
return False
518+
628519
def get_status(self) -> Dict[str, Any]:
629520
tree_sitter_status = {}
630521
if self.tree_sitter:
@@ -648,23 +539,6 @@ def get_status(self) -> Dict[str, Any]:
648539
]
649540
}
650541

651-
async def health_check(self) -> bool:
652-
try:
653-
messages = [UserMessage(role="user", content="Health check - please respond with 'OK'")]
654-
generator = self.client.agents.turn.create(
655-
agent_id=self.agent_id,
656-
session_id=self.session_id,
657-
messages=messages,
658-
stream=True,
659-
)
660-
for chunk in generator:
661-
break
662-
self.logger.info(" Health check passed")
663-
return True
664-
except Exception as e:
665-
self.logger.error(f" Health check failed: {e}")
666-
return False
667-
668542
def get_tree_sitter_status(self) -> Dict[str, Any]:
669543
if not self.tree_sitter:
670544
return {

0 commit comments

Comments
 (0)