Skip to content

Commit 8a70e9a

Browse files
authored
Merge pull request #910 from crestalnetwork/hyacinthus
feat: system prompt now support search and super
2 parents 3f4fccd + 4d9b41c commit 8a70e9a

File tree

4 files changed

+45
-40
lines changed

4 files changed

+45
-40
lines changed

CHANGELOG.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,13 @@
1+
## v0.8.29 - 2025-11-13
2+
3+
### Bug Fixes
4+
- Fixed engine.py with latest changes
5+
6+
### Documentation
7+
- Updated changelog
8+
9+
**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.8.28...v0.8.29
10+
111
## v0.8.28 - 2025-11-13
212

313
### Changes

intentkit/core/engine.py

Lines changed: 6 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ async def select_model(
167167
) -> BaseChatModel:
168168
llm_params = {}
169169
context = runtime.context
170-
if context.search:
170+
if context.search or agent.has_search():
171171
if llm_model.info.supports_search:
172172
if llm_model.info.provider == LLMProvider.OPENAI:
173173
tools.append({"type": "web_search"})
@@ -454,30 +454,18 @@ async def stream_agent_raw(
454454

455455
# super mode
456456
recursion_limit = 30
457-
if re.search(r"@super\b", input_message) or user_message.super_mode:
457+
if (
458+
re.search(r"@super\b", input_message)
459+
or user_message.super_mode
460+
or agent.has_super()
461+
):
458462
recursion_limit = 300
459-
# Remove @super from the message
460463
input_message = re.sub(r"@super\b", "", input_message).strip()
461464

462465
# llm native search
463466
search = user_message.search_mode if user_message.search_mode is not None else False
464467
if re.search(r"@search\b", input_message) or re.search(r"@web\b", input_message):
465468
search = True
466-
if model.supports_search:
467-
input_message = re.sub(
468-
r"@search\b",
469-
"(You have native search tool, you can use it to get more recent information)",
470-
input_message,
471-
).strip()
472-
input_message = re.sub(
473-
r"@web\b",
474-
"(You have native search tool, you can use it to get more recent information)",
475-
input_message,
476-
).strip()
477-
else:
478-
search = False
479-
input_message = re.sub(r"@search\b", "", input_message).strip()
480-
input_message = re.sub(r"@web\b", "", input_message).strip()
481469

482470
# content to llm
483471
messages = [

intentkit/core/prompt.py

Lines changed: 3 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -240,35 +240,19 @@ def agent_prompt(agent: Agent, agent_data: AgentData) -> str:
240240

241241

242242
async def explain_prompt(message: str) -> str:
243-
"""
244-
Process message to replace @skill:*:* patterns with (call skill xxxxx) format.
245-
246-
Args:
247-
message (str): The input message to process
248-
249-
Returns:
250-
str: The processed message with @skill patterns replaced
251-
"""
252-
# Pattern to match @skill:category:config_name with word boundaries
253243
pattern = r"@skill:([^:]+):([^\s]+)\b"
254244

255245
async def replace_skill_pattern(match):
256246
category = match.group(1)
257247
config_name = match.group(2)
258248

259-
# Get skill by category and config_name
260249
skill = await Skill.get_by_config_name(category, config_name)
261-
262250
if skill:
263251
return f"(call skill {skill.name})"
264252
else:
265-
# If skill not found, keep original pattern
266253
return match.group(0)
267254

268-
# Find all matches
269255
matches = list(re.finditer(pattern, message))
270-
271-
# Process matches in reverse order to maintain string positions
272256
result = message
273257
for match in reversed(matches):
274258
replacement = await replace_skill_pattern(match)
@@ -362,9 +346,6 @@ async def build_entrypoint_prompt(agent: Agent, context: AgentContext) -> str |
362346
elif entrypoint == AuthorType.TRIGGER.value:
363347
entrypoint_prompt = "\n\n" + _build_autonomous_task_prompt(agent, context)
364348

365-
if entrypoint_prompt:
366-
entrypoint_prompt = await explain_prompt(entrypoint_prompt)
367-
368349
return entrypoint_prompt
369350

370351

@@ -422,16 +403,17 @@ async def get_base_prompt():
422403
async def formatted_prompt(
423404
state: AgentState, runtime: Runtime[AgentContext]
424405
) -> list[BaseMessage]:
425-
# Get base prompt (with potential admin LLM skill control processing)
406+
# Base prompt
426407
final_system_prompt = await get_base_prompt()
427408

428409
context = runtime.context
429410

430411
# Add entrypoint prompt if applicable
431412
entrypoint_prompt = await build_entrypoint_prompt(agent, context)
432413
if entrypoint_prompt:
414+
processed_entrypoint = await explain_prompt(entrypoint_prompt)
433415
final_system_prompt = (
434-
f"{final_system_prompt}## Entrypoint rules{entrypoint_prompt}\n\n"
416+
f"{final_system_prompt}## Entrypoint rules{processed_entrypoint}\n\n"
435417
)
436418

437419
# Add user info if user_id is a valid EVM wallet address
@@ -444,7 +426,6 @@ async def formatted_prompt(
444426
final_system_prompt = f"{final_system_prompt}{internal_info}"
445427

446428
if agent.prompt_append:
447-
# Find the system message in prompt_array and process it
448429
for i, (role, content) in enumerate(prompt_array):
449430
if role == "system":
450431
processed_append = await explain_prompt(content)

intentkit/models/agent.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1197,6 +1197,32 @@ async def is_model_support_image(self) -> bool:
11971197
except Exception:
11981198
return False
11991199

1200+
def has_search(self) -> bool:
1201+
texts = [
1202+
self.prompt,
1203+
self.prompt_append,
1204+
self.purpose,
1205+
self.personality,
1206+
self.principles,
1207+
]
1208+
for t in texts:
1209+
if t and (re.search(r"@search\b", t) or re.search(r"@web\b", t)):
1210+
return True
1211+
return False
1212+
1213+
def has_super(self) -> bool:
1214+
texts = [
1215+
self.prompt,
1216+
self.prompt_append,
1217+
self.purpose,
1218+
self.personality,
1219+
self.principles,
1220+
]
1221+
for t in texts:
1222+
if t and re.search(r"@super\b", t):
1223+
return True
1224+
return False
1225+
12001226
def to_yaml(self) -> str:
12011227
"""
12021228
Dump the agent model to YAML format with field descriptions as comments.

0 commit comments

Comments
 (0)