Skip to content
This repository was archived by the owner on Oct 21, 2025. It is now read-only.

Commit d3b89a0

Browse files
committed
Add findings. Support multi turn questions.
1 parent 1d319f1 commit d3b89a0

File tree

6 files changed

+280
-33
lines changed

6 files changed

+280
-33
lines changed

findings/export_20250822_170025.json

Lines changed: 57 additions & 0 deletions
Large diffs are not rendered by default.

findings/export_20250822_171024.json

Lines changed: 63 additions & 0 deletions
Large diffs are not rendered by default.

findings/quesma.findings.4.json

Lines changed: 39 additions & 0 deletions
Large diffs are not rendered by default.

src/ui/cli_findings.py

Lines changed: 80 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,82 @@ def _get_single_char(self, prompt_text: str = "Command: ") -> str:
8585
else:
8686
return input(f"\n{prompt_text}").lower()
8787

88+
def _display_conversation_rich(self, finding_data: dict) -> None:
89+
"""Display conversation using Rich formatting - handles both single-turn and multi-turn"""
90+
conversation_turns = finding_data.get("conversation_turns")
91+
all_responses = finding_data.get("all_responses")
92+
93+
if conversation_turns and all_responses and len(conversation_turns) > 0:
94+
# Multi-turn conversation: show each turn with response
95+
for i, turn_prompt in enumerate(conversation_turns):
96+
turn_num = i + 1
97+
98+
# Show user prompt for this turn
99+
self.console.print(Panel(
100+
turn_prompt,
101+
title=f"Turn {turn_num} - User Prompt",
102+
style="cyan"
103+
))
104+
105+
# Show assistant response for this turn (if available)
106+
if i < len(all_responses):
107+
response_content = all_responses[i]
108+
self.console.print(Panel(
109+
response_content,
110+
title=f"Turn {turn_num} - Model Response",
111+
style="green"
112+
))
113+
else:
114+
self.console.print(Panel(
115+
"[dim]No response recorded for this turn[/dim]",
116+
title=f"Turn {turn_num} - Model Response",
117+
style="red"
118+
))
119+
else:
120+
# Single-turn conversation: use original display logic
121+
if finding_data.get("prompt"):
122+
prompt_text = finding_data["prompt"]
123+
self.console.print(Panel(prompt_text, title="Prompt", style="yellow"))
124+
125+
if finding_data.get("response"):
126+
response_text = finding_data["response"]
127+
self.console.print(Panel(response_text, title="Response", style="blue"))
128+
129+
def _display_conversation_text(self, finding_data: dict) -> None:
130+
"""Display conversation using plain text formatting - handles both single-turn and multi-turn"""
131+
conversation_turns = finding_data.get("conversation_turns")
132+
all_responses = finding_data.get("all_responses")
133+
134+
if conversation_turns and all_responses and len(conversation_turns) > 0:
135+
# Multi-turn conversation: show each turn with response
136+
print("\nMulti-Turn Conversation:")
137+
print("=" * 60)
138+
139+
for i, turn_prompt in enumerate(conversation_turns):
140+
turn_num = i + 1
141+
142+
# Show user prompt for this turn
143+
print(f"\n--- Turn {turn_num} - User Prompt ---")
144+
print(turn_prompt)
145+
146+
# Show assistant response for this turn (if available)
147+
if i < len(all_responses):
148+
response_content = all_responses[i]
149+
print(f"\n--- Turn {turn_num} - Model Response ---")
150+
print(response_content)
151+
else:
152+
print(f"\n--- Turn {turn_num} - Model Response ---")
153+
print("(No response recorded for this turn)")
154+
else:
155+
# Single-turn conversation: use original display logic
156+
if finding_data.get("prompt"):
157+
prompt_text = finding_data["prompt"]
158+
print(f"\nPrompt:\n{prompt_text}")
159+
160+
if finding_data.get("response"):
161+
response_text = finding_data["response"]
162+
print(f"\nResponse:\n{response_text}")
163+
88164
def view_findings(self) -> None:
89165
"""Browse and view findings folder"""
90166
findings_dir = Path("findings")
@@ -331,15 +407,8 @@ def _navigate_grouped_finding(self, filepath: Path) -> None:
331407

332408
self.console.print(info_table)
333409

334-
# Show prompt
335-
if current_finding.get("prompt"):
336-
prompt_text = current_finding["prompt"]
337-
self.console.print(Panel(prompt_text, title="Prompt", style="yellow"))
338-
339-
# Show response
340-
if current_finding.get("response"):
341-
response_text = current_finding["response"]
342-
self.console.print(Panel(response_text, title="Response", style="blue"))
410+
# Show conversation (either multi-turn or single-turn)
411+
self._display_conversation_rich(current_finding)
343412

344413
# Show commands
345414
self.console.print("\n[yellow]Commands:[/yellow]")
@@ -382,13 +451,8 @@ def _navigate_grouped_finding(self, filepath: Path) -> None:
382451
if current_finding.get("notes"):
383452
print(f"Notes: {current_finding['notes']}")
384453

385-
if current_finding.get("prompt"):
386-
prompt_text = current_finding["prompt"]
387-
print(f"\nPrompt:\n{prompt_text}")
388-
389-
if current_finding.get("response"):
390-
response_text = current_finding["response"]
391-
print(f"\nResponse:\n{response_text}")
454+
# Show conversation (either multi-turn or single-turn)
455+
self._display_conversation_text(current_finding)
392456

393457
print("-" * 80)
394458
print("Commands: ", end="")

src/ui/cli_ui.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -862,6 +862,7 @@ def _export_findings(self) -> None:
862862
"test_name": result.test_name,
863863
"category": result.category, # Preserve original category string
864864
"prompt": result.get_display_prompt(),
865+
"conversation_turns": result.conversation_turns,
865866
"response": result.response,
866867
"vulnerability_detected": result.vulnerability_detected,
867868
"confidence": result.confidence,

src/utils/competition_converter.py

Lines changed: 40 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def convert_finding_to_competition(
5656
return self._build_competition_json(finding_data, harmony_text, metadata)
5757

5858
def _build_harmony_conversation(self, finding_data: dict[str, Any]) -> str:
59-
"""Build Harmony conversation from finding data"""
59+
"""Build Harmony conversation from finding data - supports both single-turn and multi-turn"""
6060
messages = []
6161

6262
# 1. Always add the constant system prompt (required for competition)
@@ -70,24 +70,47 @@ def _build_harmony_conversation(self, finding_data: dict[str, Any]) -> str:
7070
Message.from_role_and_content(Role.DEVELOPER, f"# Instructions\n\n{system_prompt}")
7171
)
7272

73-
# 3. Add user prompt
74-
messages.append(
75-
Message.from_role_and_content(
76-
Role.USER, finding_data.get("prompt", "No prompt available")
73+
# 3. Check if this is a multi-turn conversation
74+
conversation_turns = finding_data.get("conversation_turns")
75+
all_responses = finding_data.get("all_responses")
76+
77+
if conversation_turns and all_responses and len(conversation_turns) > 0:
78+
# Multi-turn conversation: add alternating USER/ASSISTANT messages
79+
for i, turn_prompt in enumerate(conversation_turns):
80+
# Add user prompt for this turn
81+
messages.append(Message.from_role_and_content(Role.USER, turn_prompt))
82+
83+
# Add assistant response for this turn (if available)
84+
if i < len(all_responses):
85+
response_content = all_responses[i]
86+
# Include thinking for this turn if available
87+
all_thinking = finding_data.get("all_thinking")
88+
if all_thinking and i < len(all_thinking) and all_thinking[i].strip():
89+
# For now, just use the response content
90+
# TODO: Consider how to properly integrate thinking into channels
91+
messages.append(Message.from_role_and_content(Role.ASSISTANT, response_content))
92+
else:
93+
messages.append(Message.from_role_and_content(Role.ASSISTANT, response_content))
94+
else:
95+
# Single-turn conversation: use original logic
96+
# 3. Add user prompt
97+
messages.append(
98+
Message.from_role_and_content(
99+
Role.USER, finding_data.get("prompt", "No prompt available")
100+
)
77101
)
78-
)
79102

80-
# 4. Add assistant response
81-
# Check if we have thinking data that should be included
82-
response_content = finding_data.get("response", "No response available")
83-
thinking = finding_data.get("thinking", "")
84-
85-
if thinking and thinking.strip():
86-
# If we have thinking, we might want to include it in channels
87-
# For now, just use the main response
88-
messages.append(Message.from_role_and_content(Role.ASSISTANT, response_content))
89-
else:
90-
messages.append(Message.from_role_and_content(Role.ASSISTANT, response_content))
103+
# 4. Add assistant response
104+
# Check if we have thinking data that should be included
105+
response_content = finding_data.get("response", "No response available")
106+
thinking = finding_data.get("thinking", "")
107+
108+
if thinking and thinking.strip():
109+
# If we have thinking, we might want to include it in channels
110+
# For now, just use the main response
111+
messages.append(Message.from_role_and_content(Role.ASSISTANT, response_content))
112+
else:
113+
messages.append(Message.from_role_and_content(Role.ASSISTANT, response_content))
91114

92115
# Convert to Harmony format string
93116
try:

0 commit comments

Comments
 (0)