Skip to content

Commit ce9d463

Browse files
authored
Fix finishReason nullability (JetBrains#771)
Fixes JetBrains#758
1 parent 5394f6a commit ce9d463

File tree

15 files changed

+661
-77
lines changed

15 files changed

+661
-77
lines changed

gradle.properties

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#Kotlin
22
kotlin.code.style=official
33
kotlin.daemon.jvmargs=-Xmx4096M
4-
kotlin.native.ignoreDisabledTargets=true
54

65
# Build JS targets using npm package manager https://kotlinlang.org/docs/js-project-setup.html#npm-dependencies
76
kotlin.js.yarn=false

integration-tests/src/jvmTest/kotlin/ai/koog/integration/tests/executor/SingleLLMPromptExecutorIntegrationTest.kt

Lines changed: 8 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -238,10 +238,6 @@ class SingleLLMPromptExecutorIntegrationTest {
238238
if (model.id == OpenAIModels.Audio.GPT4oAudio.id || model.id == OpenAIModels.Audio.GPT4oMiniAudio.id) {
239239
assumeTrue(false, "https://github.com/JetBrains/koog/issues/231")
240240
}
241-
// TODO fix (KG-394): OpenRouter anthropic/claude-sonnet-4 streaming is incompatible with our current client setup (SSE/protocol)
242-
if (model.provider == LLMProvider.OpenRouter && model.id.contains("anthropic/claude-sonnet-4")) {
243-
assumeTrue(false, "Skipping OpenRouter anthropic/claude-sonnet-4 streaming: protocol incompatibility")
244-
}
245241

246242
val executor = SingleLLMPromptExecutor(client)
247243

@@ -514,10 +510,6 @@ class SingleLLMPromptExecutorIntegrationTest {
514510
if (model.id == OpenAIModels.Audio.GPT4oAudio.id || model.id == OpenAIModels.Audio.GPT4oMiniAudio.id) {
515511
assumeTrue(false, "https://github.com/JetBrains/koog/issues/231")
516512
}
517-
// TODO fix (KG-394): OpenRouter anthropic/claude-sonnet-4 streaming is incompatible with our current client setup (SSE/protocol)
518-
if (model.provider == LLMProvider.OpenRouter && model.id.contains("anthropic/claude-sonnet-4")) {
519-
assumeTrue(false, "Skipping OpenRouter anthropic/claude-sonnet-4 streaming: protocol incompatibility")
520-
}
521513

522514
val prompt = Prompt.build("test-streaming") {
523515
system("You are a helpful assistant. You have NO output length limitations.")
@@ -550,10 +542,6 @@ class SingleLLMPromptExecutorIntegrationTest {
550542
fun integration_testStructuredDataStreaming(model: LLModel, client: LLMClient) = runTest(timeout = 300.seconds) {
551543
Models.assumeAvailable(model.provider)
552544
assumeTrue(model != OpenAIModels.CostOptimized.GPT4_1Nano, "Model $model is too small for structured streaming")
553-
// TODO fix (KG-394): OpenRouter anthropic/claude-sonnet-4 streaming is incompatible with our current client setup (SSE/protocol)
554-
if (model.provider == LLMProvider.OpenRouter && model.id.contains("anthropic/claude-sonnet-4")) {
555-
assumeTrue(false, "Skipping OpenRouter anthropic/claude-sonnet-4 streaming: protocol incompatibility")
556-
}
557545

558546
val countries = mutableListOf<Country>()
559547
val countryDefinition = markdownCountryDefinition()
@@ -641,7 +629,7 @@ class SingleLLMPromptExecutorIntegrationTest {
641629
@MethodSource("modelClientCombinations")
642630
fun integration_testToolChoiceNamed(model: LLModel, client: LLMClient) = runTest(timeout = 300.seconds) {
643631
Models.assumeAvailable(model.provider)
644-
assumeTrue(!(model.provider == LLMProvider.OpenRouter && model.id.contains("anthropic")), "KG-282")
632+
645633
assumeTrue(model.capabilities.contains(LLMCapability.ToolChoice), "Model $model does not support tools")
646634

647635
val calculatorTool = createCalculatorTool()
@@ -1142,10 +1130,7 @@ class SingleLLMPromptExecutorIntegrationTest {
11421130
model.capabilities.contains(LLMCapability.Schema.JSON.Standard),
11431131
"Model does not support Standard JSON Schema"
11441132
)
1145-
// TODO fix (KG-394): OpenRouter anthropic/claude-sonnet-4 streaming is incompatible with our current client setup (SSE/protocol)
1146-
if (model.provider == LLMProvider.OpenRouter) {
1147-
assumeTrue(false, "Skipping StructuredOutputNative for OpenRouter due to schema incompatibilities upstream")
1148-
}
1133+
11491134
val executor = SingleLLMPromptExecutor(client)
11501135

11511136
withRetry {
@@ -1167,13 +1152,7 @@ class SingleLLMPromptExecutorIntegrationTest {
11671152
model.capabilities.contains(LLMCapability.Schema.JSON.Standard),
11681153
"Model does not support Standard JSON Schema"
11691154
)
1170-
// TODO fix (KG-394) OpenRouter
1171-
if (model.provider == LLMProvider.OpenRouter) {
1172-
assumeTrue(
1173-
false,
1174-
"Skipping StructuredOutputNativeWithFixingParser for OpenRouter due to upstream schema incompatibilities"
1175-
)
1176-
}
1155+
11771156
val executor = SingleLLMPromptExecutor(client)
11781157

11791158
withRetry {
@@ -1195,6 +1174,11 @@ class SingleLLMPromptExecutorIntegrationTest {
11951174
model.provider !== LLMProvider.Google,
11961175
"Google models fail to return manually requested structured output without fixing"
11971176
)
1177+
assumeTrue(
1178+
model.provider == LLMProvider.OpenRouter && model.id.contains("gemini"),
1179+
"Google models fail to return manually requested structured output without fixing"
1180+
)
1181+
11981182
val executor = SingleLLMPromptExecutor(client)
11991183

12001184
withRetry {

integration-tests/src/jvmTest/kotlin/ai/koog/integration/tests/utils/Models.kt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,8 +84,7 @@ object Models {
8484
OpenRouterModels.GPT5Nano,
8585
OpenRouterModels.DeepSeekV30324,
8686
OpenRouterModels.Claude4Sonnet,
87-
// ToDo add Gemini when KG-203 is fixed
88-
// OpenRouterModels.Gemini2_5FlashLite,
87+
OpenRouterModels.Gemini2_5FlashLite,
8988
)
9089

9190
@JvmStatic

koog-ktor/src/commonMain/kotlin/ai/koog/ktor/utils/LLMModelParser.kt

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -237,8 +237,12 @@ private val GOOGLE_MODELS_MAP = mapOf(
237237
)
238238

239239
private val OPENROUTER_MODELS_MAP = mapOf(
240-
"claude3sonnet" to OpenRouterModels.Claude3Sonnet,
241240
"claude3haiku" to OpenRouterModels.Claude3Haiku,
241+
"claude3opus" to OpenRouterModels.Claude3Opus,
242+
"claude3sonnet" to OpenRouterModels.Claude3Sonnet,
243+
"claude35sonnet" to OpenRouterModels.Claude3_5Sonnet,
244+
"claude4sonnet" to OpenRouterModels.Claude4Sonnet,
245+
"claude41opus" to OpenRouterModels.Claude4_1Opus,
242246
"gpt4" to OpenRouterModels.GPT4,
243247
"gpt4o" to OpenRouterModels.GPT4o,
244248
"gpt5" to OpenRouterModels.GPT5,

prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/build.gradle.kts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ kotlin {
1313
commonMain {
1414
dependencies {
1515
api(project(":prompt:prompt-executor:prompt-executor-clients:prompt-executor-openai-client-base"))
16+
api(project(":prompt:prompt-structure"))
17+
api(project(":prompt:prompt-executor:prompt-executor-clients:prompt-executor-openai-client"))
1618
implementation(libs.oshai.kotlin.logging)
1719
}
1820
}

prompt/prompt-executor/prompt-executor-clients/prompt-executor-deepseek-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/deepseek/DeepSeekLLMClient.kt

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,20 @@ import ai.koog.prompt.executor.clients.openai.base.OpenAIBasedSettings
1313
import ai.koog.prompt.executor.clients.openai.base.models.OpenAIMessage
1414
import ai.koog.prompt.executor.clients.openai.base.models.OpenAITool
1515
import ai.koog.prompt.executor.clients.openai.base.models.OpenAIToolChoice
16+
import ai.koog.prompt.executor.clients.openai.structure.OpenAIBasicJsonSchemaGenerator
17+
import ai.koog.prompt.executor.clients.openai.structure.OpenAIStandardJsonSchemaGenerator
1618
import ai.koog.prompt.executor.model.LLMChoice
19+
import ai.koog.prompt.llm.LLMProvider
1720
import ai.koog.prompt.llm.LLModel
1821
import ai.koog.prompt.params.LLMParams
1922
import ai.koog.prompt.streaming.StreamFrameFlowBuilder
23+
import ai.koog.prompt.structure.RegisteredBasicJsonSchemaGenerators
24+
import ai.koog.prompt.structure.RegisteredStandardJsonSchemaGenerators
25+
import ai.koog.prompt.structure.annotations.InternalStructuredOutputApi
2026
import io.github.oshai.kotlinlogging.KotlinLogging
2127
import io.ktor.client.HttpClient
2228
import kotlinx.datetime.Clock
29+
import kotlin.collections.set
2330

2431
/**
2532
* Configuration settings for connecting to the DeepSeek API.
@@ -54,8 +61,14 @@ public class DeepSeekLLMClient(
5461
staticLogger
5562
) {
5663

64+
@OptIn(InternalStructuredOutputApi::class)
5765
private companion object {
5866
private val staticLogger = KotlinLogging.logger { }
67+
68+
init {
69+
RegisteredBasicJsonSchemaGenerators[LLMProvider.DeepSeek] = OpenAIBasicJsonSchemaGenerator
70+
RegisteredStandardJsonSchemaGenerators[LLMProvider.DeepSeek] = OpenAIStandardJsonSchemaGenerator
71+
}
5972
}
6073

6174
override fun serializeProviderChatRequest(
@@ -92,7 +105,12 @@ public class DeepSeekLLMClient(
92105

93106
override fun processProviderChatResponse(response: DeepSeekChatCompletionResponse): List<LLMChoice> {
94107
require(response.choices.isNotEmpty()) { "Empty choices in response" }
95-
return response.choices.map { it.toMessageResponses(createMetaInfo(response.usage)) }
108+
return response.choices.map {
109+
it.message.toMessageResponses(
110+
it.finishReason,
111+
createMetaInfo(response.usage),
112+
)
113+
}
96114
}
97115

98116
override fun decodeStreamingResponse(data: String): DeepSeekChatCompletionStreamResponse =

prompt/prompt-executor/prompt-executor-clients/prompt-executor-openai-client-base/src/commonMain/kotlin/ai/koog/prompt/executor/clients/openai/base/AbstractOpenAILLMClient.kt

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ import ai.koog.prompt.executor.clients.openai.base.models.Content
1212
import ai.koog.prompt.executor.clients.openai.base.models.JsonSchemaObject
1313
import ai.koog.prompt.executor.clients.openai.base.models.OpenAIBaseLLMResponse
1414
import ai.koog.prompt.executor.clients.openai.base.models.OpenAIBaseLLMStreamResponse
15-
import ai.koog.prompt.executor.clients.openai.base.models.OpenAIChoice
1615
import ai.koog.prompt.executor.clients.openai.base.models.OpenAIContentPart
1716
import ai.koog.prompt.executor.clients.openai.base.models.OpenAIFunction
1817
import ai.koog.prompt.executor.clients.openai.base.models.OpenAIMessage
@@ -406,10 +405,10 @@ public abstract class AbstractOpenAILLMClient<TResponse : OpenAIBaseLLMResponse,
406405
}
407406

408407
@OptIn(ExperimentalEncodingApi::class)
409-
protected fun OpenAIChoice.toMessageResponses(metaInfo: ResponseMetaInfo): List<Message.Response> {
408+
protected fun OpenAIMessage.toMessageResponses(finishReason: String?, metaInfo: ResponseMetaInfo): List<Message.Response> {
410409
return when {
411-
message is OpenAIMessage.Assistant && !message.toolCalls.isNullOrEmpty() -> {
412-
message.toolCalls.map { toolCall ->
410+
this is OpenAIMessage.Assistant && !this.toolCalls.isNullOrEmpty() -> {
411+
this.toolCalls.map { toolCall ->
413412
Message.Tool.Call(
414413
id = toolCall.id,
415414
tool = toolCall.function.name,
@@ -419,20 +418,20 @@ public abstract class AbstractOpenAILLMClient<TResponse : OpenAIBaseLLMResponse,
419418
}
420419
}
421420

422-
message.content != null -> listOf(
421+
this.content != null -> listOf(
423422
Message.Assistant(
424-
content = message.content!!.text(),
423+
content = this.content!!.text(),
425424
finishReason = finishReason,
426425
metaInfo = metaInfo
427426
)
428427
)
429428

430-
message is OpenAIMessage.Assistant && message.audio?.data != null -> listOf(
429+
this is OpenAIMessage.Assistant && this.audio?.data != null -> listOf(
431430
Message.Assistant(
432-
content = message.audio.transcript.orEmpty(),
431+
content = this.audio.transcript.orEmpty(),
433432
attachments = listOf(
434433
Attachment.Audio(
435-
content = AttachmentContent.Binary.Base64(message.audio.data),
434+
content = AttachmentContent.Binary.Base64(this.audio.data),
436435
format = "unknown", // FIXME: clarify format from response
437436
)
438437
),

prompt/prompt-executor/prompt-executor-clients/prompt-executor-openai-client-base/src/commonMain/kotlin/ai/koog/prompt/executor/clients/openai/base/models/OpenAIDataModels.kt

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -352,6 +352,8 @@ public class OpenAIStreamFunction(
352352
* [coral][OpenAIAudioVoice.Coral], [echo][OpenAIAudioVoice.Echo], [fable][OpenAIAudioVoice.Fable],
353353
* [nova][OpenAIAudioVoice.Nova], [onyx][OpenAIAudioVoice.Onyx], [sage][OpenAIAudioVoice.Sage]
354354
* and [shimmer][OpenAIAudioVoice.Shimmer]
355+
*
356+
* See [audio](https://platform.openai.com/docs/api-reference/chat/create#chat-create-audio)
355357
*/
356358
@Serializable
357359
public class OpenAIAudioConfig(
@@ -364,6 +366,8 @@ public class OpenAIAudioConfig(
364366
*
365367
* This enum is used to specify the format of the audio output. It contains several standard audio formats
366368
* which are widely compatible with various audio players and systems.
369+
*
370+
* See [audio/format](https://platform.openai.com/docs/api-reference/chat/create#chat-create-audio-format)
367371
*/
368372
@Serializable
369373
public enum class OpenAIAudioFormat {
@@ -387,6 +391,8 @@ public enum class OpenAIAudioFormat {
387391
* Represents the available voice options for audio output in OpenAI's system.
388392
*
389393
* This enum defines a list of predefined voices that can be used to synthesize audio responses.
394+
*
395+
* See [audio/voice](https://platform.openai.com/docs/api-reference/chat/create#chat-create-audio-voice)
390396
*/
391397
@Serializable
392398
public enum class OpenAIAudioVoice {
@@ -458,6 +464,8 @@ public class OpenAIStaticContent(public val content: Content) {
458464
* If not set, the model/provider default applies.
459465
*
460466
* Serialized as `"minimal" | "low" | "medium" | "high"`.
467+
*
468+
* See [reasoning_effort](https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort)
461469
*/
462470
@Serializable
463471
public enum class ReasoningEffort {
@@ -549,6 +557,8 @@ public sealed interface OpenAIResponseFormat {
549557
* Note: When a tier is requested, the response payload includes the
550558
* `service_tier` actually used to serve the request. This value may differ from
551559
* the one provided in the request.
560+
*
561+
* See [service_tier](https://platform.openai.com/docs/api-reference/chat/create#chat-create-service_tier)
552562
*/
553563
public enum class ServiceTier {
554564
/**
@@ -613,6 +623,8 @@ public class JsonSchemaObject(
613623
* All other chunks will also include a `usage` field, but with a null value.
614624
* NOTE: If the stream is interrupted,
615625
* you may not receive the final usage chunk which contains the total token usage for the request.
626+
*
627+
* See [stream_options](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream_options)
616628
*/
617629
@Serializable
618630
public class OpenAIStreamOptions(public val includeUsage: Boolean? = null)
@@ -764,6 +776,8 @@ public class OpenAIUserLocation(
764776
* @property index The index of the choice in the list of choices.
765777
* @property logprobs Log probability information for the choice.
766778
* @property message A chat completion message generated by the model.
779+
*
780+
* See [choices](https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices)
767781
*/
768782
@Serializable
769783
public class OpenAIChoice(
@@ -776,6 +790,8 @@ public class OpenAIChoice(
776790
/**
777791
* @property content A list of message content tokens with log probability information.
778792
* @property refusal A list of message refusal tokens with log probability information.
793+
*
794+
* See [choices/logprobs](https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices-logprobs)
779795
*/
780796
@Serializable
781797
public class OpenAIChoiceLogProbs(
@@ -793,6 +809,9 @@ public class OpenAIChoiceLogProbs(
793809
* @property token The token.
794810
* @property topLogprobs List of the most likely tokens and their log probability, at this token position.
795811
* In rare cases, there may be fewer than the number of requested `[topLogprobs]` returned.
812+
*
813+
* See [choices/logprobs/content](https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices-logprobs-content)
814+
* and [choices/logprobs/refusal](https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices-logprobs-refusal)
796815
*/
797816
@Serializable
798817
public class ContentLogProbs(
@@ -810,6 +829,9 @@ public class OpenAIChoiceLogProbs(
810829
* @property logprob The log probability of this token, if it is within the top 20 most likely tokens.
811830
* Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.
812831
* @property token The token.
832+
*
833+
* See [choices/logprobs/content/top_logprobs](https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices-logprobs-content-top_logprobs)
834+
* and [choices/logprobs/refusal/top_logprobs](https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices-logprobs-refusal-top_logprobs)
813835
*/
814836
@Serializable
815837
public class ContentTopLogProbs(
@@ -849,6 +871,9 @@ public class OpenAIWebUrlCitation(public val urlCitation: Citation) {
849871
* @property totalTokens Total number of tokens used in the request (prompt + completion).
850872
* @property completionTokensDetails Breakdown of tokens used in a completion.
851873
* @property promptTokensDetails Breakdown of tokens used in the prompt.
874+
*
875+
* See [chat completions usage](https://platform.openai.com/docs/api-reference/chat/object#chat/object-usage)
876+
* and [streaming usage](https://platform.openai.com/docs/api-reference/chat-streaming/streaming#chat-streaming/streaming-usage)
852877
*/
853878
@Serializable
854879
public class OpenAIUsage(
@@ -868,6 +893,9 @@ public class OpenAIUsage(
868893
* the number of tokens in the prediction that did not appear in the completion.
869894
* However, like reasoning tokens, these tokens are still counted in the total completion tokens for purposes of billing,
870895
* output and context window limits.
896+
*
897+
* See [chat completions usage/completion_tokens_details](https://platform.openai.com/docs/api-reference/chat/object#chat/object-usage-completion_tokens_details)
898+
* and [streaming usage/completion_tokens_details](https://platform.openai.com/docs/api-reference/chat-streaming/streaming#chat-streaming/streaming-usage-completion_tokens_details)
871899
*/
872900
@Serializable
873901
public class CompletionTokensDetails(
@@ -880,6 +908,9 @@ public class CompletionTokensDetails(
880908
/**
881909
* @property audioTokens Audio input tokens generated by the model.
882910
* @property cachedTokens Cached tokens present in the prompt.
911+
*
912+
* See [chat completions usage/prompt_tokens_details](https://platform.openai.com/docs/api-reference/chat/object#chat/object-usage-prompt_tokens_details)
913+
* and [streaming usage/prompt_tokens_details](https://platform.openai.com/docs/api-reference/chat-streaming/streaming#chat-streaming/streaming-usage-prompt_tokens_details)
883914
*/
884915
@Serializable
885916
public class PromptTokensDetails(
@@ -897,6 +928,7 @@ public class PromptTokensDetails(
897928
* @property index The index of the choice in the list of choices.
898929
* @property logprobs Log probability information for the choice.
899930
*
931+
* See [choices](https://platform.openai.com/docs/api-reference/chat-streaming/streaming#chat-streaming/streaming-choices)
900932
*/
901933
@Serializable
902934
public class OpenAIStreamChoice(
@@ -911,6 +943,8 @@ public class OpenAIStreamChoice(
911943
* @property refusal The refusal message generated by the model.
912944
* @property role The role of the author of this message.
913945
* @property toolCalls
946+
*
947+
* See [choices/delta](https://platform.openai.com/docs/api-reference/chat-streaming/streaming#chat-streaming/streaming-choices-delta)
914948
*/
915949
@Serializable
916950
public class OpenAIStreamDelta(

prompt/prompt-executor/prompt-executor-clients/prompt-executor-openai-client/src/commonMain/kotlin/ai/koog/prompt/executor/clients/openai/OpenAILLMClient.kt

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,12 @@ public open class OpenAILLMClient(
223223

224224
override fun processProviderChatResponse(response: OpenAIChatCompletionResponse): List<LLMChoice> {
225225
require(response.choices.isNotEmpty()) { "Empty choices in response" }
226-
return response.choices.map { it.toMessageResponses(createMetaInfo(response.usage)) }
226+
return response.choices.map {
227+
it.message.toMessageResponses(
228+
it.finishReason,
229+
createMetaInfo(response.usage),
230+
)
231+
}
227232
}
228233

229234
override fun decodeStreamingResponse(data: String): OpenAIChatCompletionStreamResponse =

0 commit comments

Comments
 (0)