-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcontext_aware_prompt.py
More file actions
217 lines (181 loc) · 8.32 KB
/
context_aware_prompt.py
File metadata and controls
217 lines (181 loc) · 8.32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
"""
Context-aware prompt tools for MCP integration
"""
import logging
from typing import Dict, Any, List, Optional
from fastmcp import FastMCP
logger = logging.getLogger(__name__)
def register_context_aware_tools(mcp: FastMCP):
"""
Register context-aware prompt tools with the MCP server
Args:
mcp: FastMCP server instance
"""
@mcp.tool()
async def generate_contextual_prompt(
query: str,
context: Optional[Dict[str, Any]] = None,
prompt_type: str = "search"
) -> Dict[str, Any]:
"""
Generate a context-aware prompt based on the query and context
Args:
query: User query or request
context: Optional context information
prompt_type: Type of prompt to generate (search, analysis, etc.)
Returns:
Dictionary containing the generated prompt and metadata
"""
try:
logger.info(f"Generating contextual prompt for query: {query[:100]}...")
# Base prompt templates
templates = {
"search": "Based on the following context, help me understand: {query}\n\nContext: {context}",
"analysis": "Analyze the following information in context: {query}\n\nContext: {context}",
"explanation": "Explain this concept with relevant context: {query}\n\nContext: {context}",
"comparison": "Compare and contrast the following: {query}\n\nContext: {context}",
"recommendation": "Provide recommendations based on: {query}\n\nContext: {context}"
}
# Select appropriate template
template = templates.get(prompt_type, templates["search"])
# Format context
context_str = ""
if context:
if isinstance(context, dict):
context_parts = []
for key, value in context.items():
context_parts.append(f"{key}: {value}")
context_str = "\n".join(context_parts)
else:
context_str = str(context)
# Generate prompt
prompt = template.format(query=query, context=context_str)
# Add additional context-aware enhancements
if context and len(str(context)) > 100:
prompt += "\n\nNote: This query has substantial context available. Consider all relevant information when responding."
return {
"prompt": prompt,
"prompt_type": prompt_type,
"has_context": bool(context),
"context_length": len(str(context)) if context else 0,
"query_length": len(query)
}
except Exception as e:
logger.error(f"Error generating contextual prompt: {e}")
return {
"error": str(e),
"prompt": f"Help me with: {query}",
"prompt_type": prompt_type,
"has_context": False
}
@mcp.tool()
async def extract_context_keywords(
text: str,
max_keywords: int = 10
) -> Dict[str, Any]:
"""
Extract relevant keywords from text for context understanding
Args:
text: Text to analyze
max_keywords: Maximum number of keywords to extract
Returns:
Dictionary containing extracted keywords and metadata
"""
try:
logger.info(f"Extracting keywords from text ({len(text)} chars)")
# Simple keyword extraction (in a real implementation, you might use NLP libraries)
words = text.lower().split()
# Filter out common stop words
stop_words = {
'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'have',
'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should',
'this', 'that', 'these', 'those', 'i', 'you', 'he', 'she', 'it', 'we', 'they'
}
# Filter words and count frequency
filtered_words = [
word.strip('.,!?;:()[]{}"\'')
for word in words
if word.strip('.,!?;:()[]{}"\'')
and word.strip('.,!?;:()[]{}"\'') not in stop_words
and len(word.strip('.,!?;:()[]{}"\'')) > 2
]
# Count word frequency
word_count = {}
for word in filtered_words:
word_count[word] = word_count.get(word, 0) + 1
# Sort by frequency and limit
sorted_words = sorted(word_count.items(), key=lambda x: x[1], reverse=True)
keywords = [word for word, count in sorted_words[:max_keywords]]
return {
"keywords": keywords,
"count": len(keywords),
"text_length": len(text),
"unique_words": len(set(filtered_words))
}
except Exception as e:
logger.error(f"Error extracting keywords: {e}")
return {
"error": str(e),
"keywords": [],
"count": 0
}
@mcp.tool()
async def optimize_prompt_for_context(
prompt: str,
context_type: str = "general",
response_format: str = "detailed"
) -> Dict[str, Any]:
"""
Optimize a prompt based on context type and desired response format
Args:
prompt: Original prompt to optimize
context_type: Type of context (code, documentation, analysis, etc.)
response_format: Desired response format (detailed, concise, technical, etc.)
Returns:
Dictionary containing optimized prompt and metadata
"""
try:
logger.info(f"Optimizing prompt for context: {context_type}")
# Context-specific optimizations
context_instructions = {
"code": "Provide clear, executable code examples with comments. Include any imports or setup needed.",
"documentation": "Structure the response with clear headings, bullet points, and practical examples.",
"analysis": "Provide a thorough analysis with supporting evidence and clear reasoning.",
"troubleshooting": "Include step-by-step diagnostic procedures and potential solutions.",
"learning": "Explain concepts progressively with analogies and practical applications.",
"general": "Provide a comprehensive yet accessible response with relevant examples."
}
# Response format instructions
format_instructions = {
"detailed": "Provide comprehensive coverage with in-depth explanations and examples.",
"concise": "Be brief and to the point while maintaining accuracy and clarity.",
"technical": "Use precise terminology and include technical specifications when relevant.",
"practical": "Focus on actionable advice and real-world applications.",
"educational": "Structure the response for learning with clear explanations and examples."
}
# Get appropriate instructions
context_instruction = context_instructions.get(context_type, context_instructions["general"])
format_instruction = format_instructions.get(response_format, "Provide a clear and helpful response.")
# Build optimized prompt
optimized_prompt = f"""{prompt}
Context Guidelines: {context_instruction}
Response Style: {format_instruction}"""
return {
"optimized_prompt": optimized_prompt,
"original_prompt": prompt,
"context_type": context_type,
"response_format": response_format,
"context_instruction": context_instruction,
"format_instruction": format_instruction
}
except Exception as e:
logger.error(f"Error optimizing prompt: {e}")
return {
"error": str(e),
"optimized_prompt": prompt,
"original_prompt": prompt,
"context_type": context_type,
"response_format": response_format
}
logger.info("Registered context-aware prompt tools")