Skip to content

Commit edbc6ed

Browse files
committed
Introduce a transforming interceptor onLLMPromptTransforming to EventHandler feature that can modify the prompt before sending it to an LLM
1 parent 5348e14 commit edbc6ed

File tree

12 files changed

+709
-10
lines changed

12 files changed

+709
-10
lines changed

agents/agents-core/src/commonMain/kotlin/ai/koog/agents/core/feature/ContextualPromptExecutor.kt

Lines changed: 23 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,17 +38,36 @@ public class ContextualPromptExecutor(
3838
@OptIn(ExperimentalUuidApi::class)
3939
val eventId = Uuid.random().toString()
4040

41-
logger.debug { "Executing LLM call (event id: $eventId, prompt: $prompt, tools: [${tools.joinToString { it.name }}])" }
42-
context.pipeline.onLLMCallStarting(eventId, context.executionInfo, context.runId, prompt, model, tools, context)
41+
logger.debug { "Transforming prompt (event id: $eventId, prompt: $prompt, tools: [${tools.joinToString { it.name }}])" }
42+
val transformedPrompt = context.pipeline.onLLMPromptTransforming(
43+
eventId,
44+
context.executionInfo,
45+
context.runId,
46+
prompt,
47+
model,
48+
tools,
49+
context
50+
)
51+
52+
logger.debug { "Executing LLM call (event id: $eventId, prompt: $transformedPrompt, tools: [${tools.joinToString { it.name }}])" }
53+
context.pipeline.onLLMCallStarting(
54+
eventId,
55+
context.executionInfo,
56+
context.runId,
57+
transformedPrompt,
58+
model,
59+
tools,
60+
context
61+
)
4362

44-
val responses = executor.execute(prompt, model, tools)
63+
val responses = executor.execute(transformedPrompt, model, tools)
4564

4665
logger.trace { "Finished LLM call (event id: $eventId) with responses: [${responses.joinToString { "${it.role}: ${it.content}" }}]" }
4766
context.pipeline.onLLMCallCompleted(
4867
eventId,
4968
context.executionInfo,
5069
context.runId,
51-
prompt,
70+
transformedPrompt,
5271
model,
5372
tools,
5473
responses,

agents/agents-core/src/commonMain/kotlin/ai/koog/agents/core/feature/handler/AgentLifecycleEventType.kt

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,14 @@ public sealed interface AgentLifecycleEventType {
9292
//region LLM
9393

9494
/**
95-
* Represents an event triggered when an error occurs during a language model call.
95+
* Represents an event triggered when a prompt is being transformed.
96+
* This allows features to modify the prompt before [LLMCallStarting] is triggered
97+
* and before the prompt is sent to the language model.
98+
*/
99+
public object LLMPromptTransforming : AgentLifecycleEventType
100+
101+
/**
102+
* Represents an event triggered before a call is made to the language model.
96103
*/
97104
public object LLMCallStarting : AgentLifecycleEventType
98105

agents/agents-core/src/commonMain/kotlin/ai/koog/agents/core/feature/handler/llm/LLMCallEventContext.kt

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,34 @@ import ai.koog.prompt.message.Message
1515
*/
1616
public interface LLMCallEventContext : AgentLifecycleEventContext
1717

18+
/**
19+
* Represents the context for transforming a prompt before it is sent to the language model.
20+
* This context is used by features that need to modify the prompt, such as adding context from
21+
* a database, implementing RAG (Retrieval-Augmented Generation), or applying prompt templates.
22+
*
23+
* Prompt transformation occurs before [LLMCallStartingContext] is triggered, allowing
24+
* modifications to be applied prior to the LLM call event handlers.
25+
*
26+
* @property executionInfo The execution information containing parentId and current execution path.
27+
* @property runId The unique identifier for this LLM call session.
28+
* @property prompt The prompt that will be transformed. This is the current state of the prompt
29+
* after any previous transformations.
30+
* @property model The language model instance that will be used for the call.
31+
* @property tools The list of tool descriptors available for the LLM call.
32+
* @property context The AI agent context providing access to agent state and configuration.
33+
*/
34+
public data class LLMPromptTransformingContext(
35+
override val eventId: String,
36+
override val executionInfo: AgentExecutionInfo,
37+
val runId: String,
38+
val prompt: Prompt,
39+
val model: LLModel,
40+
val tools: List<ToolDescriptor>,
41+
val context: AIAgentContext
42+
) : LLMCallEventContext {
43+
override val eventType: AgentLifecycleEventType = AgentLifecycleEventType.LLMPromptTransforming
44+
}
45+
1846
/**
1947
* Represents the context for handling a before LLM call event.
2048
*

agents/agents-core/src/commonMain/kotlin/ai/koog/agents/core/feature/handler/llm/LLMCallEventHandler.kt

Lines changed: 82 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,31 @@
11
package ai.koog.agents.core.feature.handler.llm
22

3+
import ai.koog.prompt.dsl.Prompt
4+
35
/**
46
* A handler responsible for managing the execution flow of a Large Language Model (LLM) call.
5-
* It allows customization of logic to be executed before and after the LLM is called.
7+
* It allows customization of logic to be executed before and after the LLM is called,
8+
* as well as transformation of the prompt before it is sent to the model.
69
*/
710
public class LLMCallEventHandler {
811

12+
/**
13+
* A transformer that can modify the prompt before it is sent to the language model.
14+
*
15+
* This transformer enables features to implement patterns like:
16+
* - RAG (Retrieval-Augmented Generation): Query a database and add relevant context to the prompt
17+
* - Prompt templates: Apply standardized formatting or instructions
18+
* - Context injection: Add user-specific or session-specific information
19+
* - Content filtering: Modify or sanitize the prompt before sending
20+
*
21+
* Multiple transformers can be chained together.
22+
* Each transformer receives the prompt from the previous one and returns a modified version.
23+
*
24+
* By default, the transformer returns the prompt unchanged.
25+
*/
26+
public var llmPromptTransformingHandler: LLMPromptTransformingHandler =
27+
LLMPromptTransformingHandler { _, prompt -> prompt }
28+
929
/**
1030
* A handler that is invoked before making a call to the Language Learning Model (LLM).
1131
*
@@ -29,6 +49,20 @@ public class LLMCallEventHandler {
2949
*/
3050
public var llmCallCompletedHandler: LLMCallCompletedHandler =
3151
LLMCallCompletedHandler { _ -> }
52+
53+
/**
54+
* Transforms the provided prompt using the configured prompt transformer.
55+
*
56+
* This transformation occurs before [LLMCallStartingHandler] is invoked.
57+
*
58+
* @param context The context containing information about the prompt transformation
59+
* @param prompt The prompt to be transformed
60+
* @return The transformed prompt
61+
*/
62+
public suspend fun transformRequest(
63+
context: LLMPromptTransformingContext,
64+
prompt: Prompt
65+
): Prompt = llmPromptTransformingHandler.transform(context, prompt)
3266
}
3367

3468
/**
@@ -62,3 +96,50 @@ public fun interface LLMCallCompletedHandler {
6296
*/
6397
public suspend fun handle(eventContext: LLMCallCompletedContext)
6498
}
99+
100+
/**
101+
* A functional interface for transforming prompts before they are sent to the language model.
102+
*
103+
* This handler is invoked before [LLMCallStartingHandler], allowing prompt modification
104+
* prior to the LLM call event handlers being triggered.
105+
*
106+
* This handler enables features to implement patterns such as:
107+
* - RAG (Retrieval-Augmented Generation): Query a vector database and add relevant context
108+
* - Prompt augmentation: Add system instructions, user context, or conversation history
109+
* - Content filtering: Sanitize or modify prompts before sending
110+
* - Logging and auditing: Record prompts for compliance or debugging
111+
*
112+
* Multiple transformers can be registered and will be applied in sequence (chain pattern).
113+
* Each transformer receives the prompt from the previous one and returns a modified version.
114+
*
115+
* Example usage:
116+
* ```kotlin
117+
* LLMPromptTransformingHandler { context, prompt ->
118+
* // Query database for relevant context
119+
* val relevantDocs = database.search(prompt.messages.last().content)
120+
*
121+
* // Augment the prompt with retrieved context
122+
* prompt.copy(
123+
* messages = listOf(
124+
* Message.System("Context: ${relevantDocs.joinToString()}"),
125+
* *prompt.messages.toTypedArray()
126+
* )
127+
* )
128+
* }
129+
* ```
130+
*/
131+
public fun interface LLMPromptTransformingHandler {
132+
/**
133+
* Transforms the provided prompt based on the given context.
134+
*
135+
* @param context The context containing information about the LLM request, including
136+
* the run ID, model, available tools, and agent context.
137+
* @param prompt The current prompt to be transformed.
138+
* @return The transformed prompt that will be sent to the language model
139+
* (or passed to the next transformer in the chain).
140+
*/
141+
public suspend fun transform(
142+
context: LLMPromptTransformingContext,
143+
prompt: Prompt
144+
): Prompt
145+
}

agents/agents-core/src/commonMain/kotlin/ai/koog/agents/core/feature/pipeline/AIAgentPipeline.kt

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import ai.koog.agents.core.feature.handler.agent.AgentExecutionFailedContext
2121
import ai.koog.agents.core.feature.handler.agent.AgentStartingContext
2222
import ai.koog.agents.core.feature.handler.llm.LLMCallCompletedContext
2323
import ai.koog.agents.core.feature.handler.llm.LLMCallStartingContext
24+
import ai.koog.agents.core.feature.handler.llm.LLMPromptTransformingContext
2425
import ai.koog.agents.core.feature.handler.strategy.StrategyCompletedContext
2526
import ai.koog.agents.core.feature.handler.strategy.StrategyStartingContext
2627
import ai.koog.agents.core.feature.handler.streaming.LLMStreamingCompletedContext
@@ -245,6 +246,32 @@ public expect abstract class AIAgentPipeline(agentConfig: AIAgentConfig, clock:
245246

246247
//region Trigger LLM Call Handlers
247248

249+
/**
250+
* Transforms the prompt by applying all registered transformers.
251+
*
252+
* This method is called before [onLLMCallStarting] and allows features to modify
253+
* the prompt before it is sent to the language model. Multiple transformers can be
254+
* registered and will be applied in sequence (chain pattern).
255+
*
256+
* @param eventId Unique identifier for this event
257+
* @param executionInfo The execution information containing parentId and current execution path
258+
* @param runId The unique identifier for this LLM call session
259+
* @param prompt The original prompt to be transformed
260+
* @param model The language model that will be used
261+
* @param tools The list of tool descriptors available for the LLM call
262+
* @param context The AI agent context
263+
* @return The transformed prompt that will be sent to the language model
264+
*/
265+
public override suspend fun onLLMPromptTransforming(
266+
eventId: String,
267+
executionInfo: AgentExecutionInfo,
268+
runId: String,
269+
prompt: Prompt,
270+
model: LLModel,
271+
tools: List<ToolDescriptor>,
272+
context: AIAgentContext
273+
): Prompt
274+
248275
/**
249276
* Notifies all registered LLM handlers before a language model call is made.
250277
*
@@ -622,6 +649,46 @@ public expect abstract class AIAgentPipeline(agentConfig: AIAgentConfig, clock:
622649
handle: suspend (StrategyCompletedContext) -> Unit
623650
)
624651

652+
/**
653+
* Registers a transformer that can modify the prompt before it is sent to the language model.
654+
*
655+
* This transformer is invoked before [interceptLLMCallStarting], allowing prompt modification
656+
* prior to the LLM call event handlers being triggered.
657+
*
658+
* This interceptor enables features to implement patterns such as:
659+
* - RAG (Retrieval-Augmented Generation): Query a vector database and add relevant context
660+
* - Prompt augmentation: Add system instructions, user context, or conversation history
661+
* - Content filtering: Sanitize or modify prompts before sending
662+
* - Logging and auditing: Record prompts for compliance or debugging
663+
*
664+
* Multiple transformers can be registered and will be applied in sequence (chain pattern).
665+
* Each transformer receives the prompt from the previous one and returns a modified version.
666+
*
667+
* @param feature The feature registering this transformer
668+
* @param transform A function that takes the transforming context and current prompt,
669+
* and returns the transformed prompt
670+
*
671+
* Example:
672+
* ```
673+
* pipeline.interceptLLMPromptTransforming(feature) { prompt ->
674+
* // Query database for relevant context
675+
* val relevantDocs = database.search(prompt.messages.last().content)
676+
*
677+
* // Return augmented prompt
678+
* prompt.copy(
679+
* messages = listOf(
680+
* Message.System("Context: ${relevantDocs.joinToString()}"),
681+
* *prompt.messages.toTypedArray()
682+
* )
683+
* )
684+
* }
685+
* ```
686+
*/
687+
public override fun interceptLLMPromptTransforming(
688+
feature: AIAgentFeature<*, *>,
689+
transform: suspend LLMPromptTransformingContext.(Prompt) -> Prompt
690+
)
691+
625692
/**
626693
* Intercepts LLM calls before they are made to modify or log the prompt.
627694
*

agents/agents-core/src/commonMain/kotlin/ai/koog/agents/core/feature/pipeline/AIAgentPipelineAPI.kt

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import ai.koog.agents.core.feature.handler.agent.AgentExecutionFailedContext
2121
import ai.koog.agents.core.feature.handler.agent.AgentStartingContext
2222
import ai.koog.agents.core.feature.handler.llm.LLMCallCompletedContext
2323
import ai.koog.agents.core.feature.handler.llm.LLMCallStartingContext
24+
import ai.koog.agents.core.feature.handler.llm.LLMPromptTransformingContext
2425
import ai.koog.agents.core.feature.handler.strategy.StrategyCompletedContext
2526
import ai.koog.agents.core.feature.handler.strategy.StrategyStartingContext
2627
import ai.koog.agents.core.feature.handler.streaming.LLMStreamingCompletedContext
@@ -126,6 +127,16 @@ public interface AIAgentPipelineAPI {
126127
//endregion
127128

128129
//region Trigger LLM Handlers
130+
public suspend fun onLLMPromptTransforming(
131+
eventId: String,
132+
executionInfo: AgentExecutionInfo,
133+
runId: String,
134+
prompt: Prompt,
135+
model: LLModel,
136+
tools: List<ToolDescriptor>,
137+
context: AIAgentContext
138+
): Prompt
139+
129140
public suspend fun onLLMCallStarting(
130141
eventId: String,
131142
executionInfo: AgentExecutionInfo,
@@ -274,6 +285,11 @@ public interface AIAgentPipelineAPI {
274285
handle: suspend (StrategyCompletedContext) -> Unit
275286
)
276287

288+
public fun interceptLLMPromptTransforming(
289+
feature: AIAgentFeature<*, *>,
290+
transform: suspend LLMPromptTransformingContext.(Prompt) -> Prompt
291+
)
292+
277293
public fun interceptLLMCallStarting(
278294
feature: AIAgentFeature<*, *>,
279295
handle: suspend (eventContext: LLMCallStartingContext) -> Unit

agents/agents-core/src/commonMain/kotlin/ai/koog/agents/core/feature/pipeline/AIAgentPipelineImpl.kt

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,8 @@ import ai.koog.agents.core.feature.handler.llm.LLMCallCompletedHandler
3434
import ai.koog.agents.core.feature.handler.llm.LLMCallEventHandler
3535
import ai.koog.agents.core.feature.handler.llm.LLMCallStartingContext
3636
import ai.koog.agents.core.feature.handler.llm.LLMCallStartingHandler
37+
import ai.koog.agents.core.feature.handler.llm.LLMPromptTransformingContext
38+
import ai.koog.agents.core.feature.handler.llm.LLMPromptTransformingHandler
3739
import ai.koog.agents.core.feature.handler.strategy.StrategyCompletedContext
3840
import ai.koog.agents.core.feature.handler.strategy.StrategyCompletedHandler
3941
import ai.koog.agents.core.feature.handler.strategy.StrategyEventHandler
@@ -277,6 +279,21 @@ public class AIAgentPipelineImpl(
277279

278280
//region Trigger LLM Call Handlers
279281

282+
public override suspend fun onLLMPromptTransforming(
283+
eventId: String,
284+
executionInfo: AgentExecutionInfo,
285+
runId: String,
286+
prompt: Prompt,
287+
model: LLModel,
288+
tools: List<ToolDescriptor>,
289+
context: AIAgentContext
290+
): Prompt {
291+
val eventContext = LLMPromptTransformingContext(eventId, executionInfo, runId, prompt, model, tools, context)
292+
return llmCallEventHandlers.values.fold(prompt) { currentPrompt, handler ->
293+
handler.transformRequest(eventContext.copy(prompt = currentPrompt), currentPrompt)
294+
}
295+
}
296+
280297
public override suspend fun onLLMCallStarting(
281298
eventId: String,
282299
executionInfo: AgentExecutionInfo,
@@ -567,6 +584,17 @@ public class AIAgentPipelineImpl(
567584
)
568585
}
569586

587+
public override fun interceptLLMPromptTransforming(
588+
feature: AIAgentFeature<*, *>,
589+
transform: suspend LLMPromptTransformingContext.(Prompt) -> Prompt
590+
) {
591+
val handler = llmCallEventHandlers.getOrPut(feature.key) { LLMCallEventHandler() }
592+
593+
handler.llmPromptTransformingHandler = LLMPromptTransformingHandler(
594+
function = createConditionalTransformHandler(feature, transform)
595+
)
596+
}
597+
570598
public override fun interceptLLMCallStarting(
571599
feature: AIAgentFeature<*, *>,
572600
handle: suspend (eventContext: LLMCallStartingContext) -> Unit
@@ -996,6 +1024,21 @@ public class AIAgentPipelineImpl(
9961024
eventContext.handle(env)
9971025
}
9981026

1027+
@InternalAgentsApi
1028+
public fun createConditionalTransformHandler(
1029+
feature: AIAgentFeature<*, *>,
1030+
handle: suspend LLMPromptTransformingContext.(Prompt) -> Prompt
1031+
): suspend (LLMPromptTransformingContext, Prompt) -> Prompt =
1032+
handler@{ eventContext, prompt ->
1033+
val featureConfig = registeredFeatures[feature.key]?.featureConfig
1034+
1035+
if (featureConfig != null && !featureConfig.isAccepted(eventContext)) {
1036+
return@handler prompt
1037+
}
1038+
1039+
eventContext.handle(prompt)
1040+
}
1041+
9991042
public override fun FeatureConfig.isAccepted(eventContext: AgentLifecycleEventContext): Boolean {
10001043
return this.eventFilter.invoke(eventContext)
10011044
}

0 commit comments

Comments
 (0)