Skip to content

Commit 37d5f6c

Browse files
kah-sengJunyi-994ndrelimwjiayisJunyi Hou
authored
feat: BYOK (#161)
## Summary Adds BYOK features (including base URL, API key and param configurations), also added changes as per suggestions from #157. ## Tested Providers Stable: GPT, Claude, Gemini, MiniMax, OpenRouter Unstable: DeepSeek, GLM ## Screenshots <img width="483" height="78" alt="image" src="https://github.com/user-attachments/assets/406ed93e-7514-417c-8013-199ca8da306c" /> <br> <img width="457" height="743" alt="image" src="https://github.com/user-attachments/assets/7c172284-6280-4f89-99df-6e551b79586f" /> <br> <img width="497" height="321" alt="image" src="https://github.com/user-attachments/assets/34c322ef-2ffc-4dc9-881f-f220a4582ff1" /> <br> Closes #118 Closes #149 Closes #157 --------- Co-authored-by: Junyi <hji200914@gmail.com> Co-authored-by: 4ndrelim <tiltedaf3@gmail.com> Co-authored-by: Wang Jiayi <95198512+wjiayis@users.noreply.github.com> Co-authored-by: andre <95348273+4ndrelim@users.noreply.github.com> Co-authored-by: Junyi Hou <junyi@xtras3.tail08d22c.ts.net> Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 9ecf372 commit 37d5f6c

24 files changed

Lines changed: 1338 additions & 241 deletions

File tree

internal/api/chat/create_conversation_message_stream_v2.go

Lines changed: 45 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,14 @@ package chat
22

33
import (
44
"context"
5+
"fmt"
56
"paperdebugger/internal/api/mapper"
67
"paperdebugger/internal/libs/contextutil"
78
"paperdebugger/internal/libs/shared"
89
"paperdebugger/internal/models"
910
"paperdebugger/internal/services"
1011
chatv2 "paperdebugger/pkg/gen/api/chat/v2"
12+
"strings"
1113

1214
"github.com/google/uuid"
1315
"github.com/openai/openai-go/v3"
@@ -276,12 +278,50 @@ func (s *ChatServerV2) CreateConversationMessageStream(
276278
return s.sendStreamError(stream, err)
277279
}
278280

279-
// Usage is the same as ChatCompletion, just passing the stream parameter
280-
llmProvider := &models.LLMProviderConfig{
281-
APIKey: settings.OpenAIAPIKey,
281+
// Check if user has an API key for requested model
282+
var llmProvider *models.LLMProviderConfig
283+
var customModel *models.CustomModel
284+
customModel = nil
285+
286+
customModelID := req.GetCustomModelId()
287+
if customModelID != "" {
288+
for i := range settings.CustomModels {
289+
if settings.CustomModels[i].Id.Hex() == customModelID {
290+
customModel = &settings.CustomModels[i]
291+
break
292+
}
293+
}
294+
if customModel == nil {
295+
return s.sendStreamError(stream, fmt.Errorf("custom model not found: %q", customModelID))
296+
}
297+
modelSlug = customModel.Slug
298+
}
299+
300+
if customModel == nil {
301+
// User did not specify API key for this model
302+
llmProvider = &models.LLMProviderConfig{
303+
APIKey: "",
304+
IsCustomModel: false,
305+
}
306+
} else {
307+
customModel.BaseUrl = strings.ToLower(customModel.BaseUrl)
308+
309+
if strings.Contains(customModel.BaseUrl, "paperdebugger.com") {
310+
customModel.BaseUrl = ""
311+
}
312+
if !strings.HasPrefix(customModel.BaseUrl, "https://") {
313+
customModel.BaseUrl = strings.Replace(customModel.BaseUrl, "http://", "", 1)
314+
customModel.BaseUrl = "https://" + customModel.BaseUrl
315+
}
316+
317+
llmProvider = &models.LLMProviderConfig{
318+
APIKey: customModel.APIKey,
319+
Endpoint: customModel.BaseUrl,
320+
IsCustomModel: true,
321+
}
282322
}
283323

284-
openaiChatHistory, inappChatHistory, err := s.aiClientV2.ChatCompletionStreamV2(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistoryCompletion, llmProvider)
324+
openaiChatHistory, inappChatHistory, err := s.aiClientV2.ChatCompletionStreamV2(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistoryCompletion, llmProvider, customModel)
285325
if err != nil {
286326
return s.sendStreamError(stream, err)
287327
}
@@ -307,7 +347,7 @@ func (s *ChatServerV2) CreateConversationMessageStream(
307347
for i, bsonMsg := range conversation.InappChatHistory {
308348
protoMessages[i] = mapper.BSONToChatMessageV2(bsonMsg)
309349
}
310-
title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider)
350+
title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider, modelSlug, customModel)
311351
if err != nil {
312352
s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex())
313353
return

internal/api/chat/list_supported_models_v2.go

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ package chat
22

33
import (
44
"context"
5-
"strings"
65

76
"paperdebugger/internal/libs/contextutil"
87
chatv2 "paperdebugger/pkg/gen/api/chat/v2"
@@ -220,32 +219,35 @@ func (s *ChatServerV2) ListSupportedModels(
220219
return nil, err
221220
}
222221

223-
hasOwnAPIKey := strings.TrimSpace(settings.OpenAIAPIKey) != ""
224-
225222
var models []*chatv2.SupportedModel
226-
for _, config := range allModels {
227-
// Choose the appropriate slug based on whether user has their own API key.
228-
//
229-
// Some models are only available via OpenRouter; for those, slugOpenAI may be empty.
230-
// In that case, keep using the OpenRouter slug to avoid returning an empty model slug.
231-
slug := config.slugOpenRouter
232-
if hasOwnAPIKey && strings.TrimSpace(config.slugOpenAI) != "" {
233-
slug = config.slugOpenAI
234-
}
235223

224+
for _, model := range settings.CustomModels {
225+
modelID := model.Id.Hex()
226+
models = append(models, &chatv2.SupportedModel{
227+
Id: &modelID,
228+
Name: model.Name,
229+
Slug: model.Slug,
230+
TotalContext: int64(model.ContextWindow),
231+
MaxOutput: int64(model.MaxOutput),
232+
InputPrice: int64(model.InputPrice),
233+
OutputPrice: int64(model.OutputPrice),
234+
IsCustom: true,
235+
})
236+
}
237+
238+
for _, config := range allModels {
236239
model := &chatv2.SupportedModel{
237240
Name: config.name,
238-
Slug: slug,
241+
Slug: config.slugOpenRouter,
239242
TotalContext: config.totalContext,
240243
MaxOutput: config.maxOutput,
241244
InputPrice: config.inputPrice,
242245
OutputPrice: config.outputPrice,
243246
}
244247

245248
// If model requires own key but user hasn't provided one, mark as disabled
246-
if config.requireOwnKey && !hasOwnAPIKey {
247-
model.Disabled = true
248-
model.DisabledReason = stringPtr("Requires your own OpenAI API key. Configure it in Settings.")
249+
if config.requireOwnKey {
250+
continue
249251
}
250252

251253
models = append(models, model)

internal/api/mapper/user.go

Lines changed: 51 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,26 +3,75 @@ package mapper
33
import (
44
"paperdebugger/internal/models"
55
userv1 "paperdebugger/pkg/gen/api/user/v1"
6+
7+
"go.mongodb.org/mongo-driver/v2/bson"
68
)
79

810
func MapProtoSettingsToModel(settings *userv1.Settings) *models.Settings {
11+
// Map the slice of custom models
12+
customModels := make([]models.CustomModel, len(settings.CustomModels))
13+
for i, m := range settings.CustomModels {
14+
var id bson.ObjectID
15+
16+
id, err := bson.ObjectIDFromHex(m.Id)
17+
if err != nil {
18+
id = bson.NewObjectID()
19+
}
20+
21+
customModels[i] = models.CustomModel{
22+
Id: id,
23+
Slug: m.Slug,
24+
Name: m.Name,
25+
BaseUrl: m.BaseUrl,
26+
APIKey: m.ApiKey,
27+
ContextWindow: m.ContextWindow,
28+
MaxOutput: m.MaxOutput,
29+
InputPrice: m.InputPrice,
30+
OutputPrice: m.OutputPrice,
31+
Temperature: m.Temperature,
32+
ParallelToolCalls: m.ParallelToolCalls,
33+
Store: m.Store,
34+
}
35+
}
36+
937
return &models.Settings{
1038
ShowShortcutsAfterSelection: settings.ShowShortcutsAfterSelection,
1139
FullWidthPaperDebuggerButton: settings.FullWidthPaperDebuggerButton,
12-
EnableCitationSuggestion: settings.EnableCitationSuggestion,
40+
EnableCitationSuggestion: settings.EnableCitationSuggestion,
1341
FullDocumentRag: settings.FullDocumentRag,
1442
ShowedOnboarding: settings.ShowedOnboarding,
1543
OpenAIAPIKey: settings.OpenaiApiKey,
44+
CustomModels: customModels,
1645
}
1746
}
1847

1948
func MapModelSettingsToProto(settings *models.Settings) *userv1.Settings {
49+
// Map the slice back to Proto
50+
customModels := make([]*userv1.CustomModel, len(settings.CustomModels))
51+
for i, m := range settings.CustomModels {
52+
customModels[i] = &userv1.CustomModel{
53+
Id: m.Id.Hex(),
54+
Slug: m.Slug,
55+
Name: m.Name,
56+
BaseUrl: m.BaseUrl,
57+
ApiKey: m.APIKey,
58+
ContextWindow: m.ContextWindow,
59+
MaxOutput: m.MaxOutput,
60+
InputPrice: m.InputPrice,
61+
OutputPrice: m.OutputPrice,
62+
Temperature: m.Temperature,
63+
ParallelToolCalls: m.ParallelToolCalls,
64+
Store: m.Store,
65+
}
66+
}
67+
2068
return &userv1.Settings{
2169
ShowShortcutsAfterSelection: settings.ShowShortcutsAfterSelection,
2270
FullWidthPaperDebuggerButton: settings.FullWidthPaperDebuggerButton,
23-
EnableCitationSuggestion: settings.EnableCitationSuggestion,
71+
EnableCitationSuggestion: settings.EnableCitationSuggestion,
2472
FullDocumentRag: settings.FullDocumentRag,
2573
ShowedOnboarding: settings.ShowedOnboarding,
2674
OpenaiApiKey: settings.OpenAIAPIKey,
75+
CustomModels: customModels,
2776
}
2877
}

internal/models/llm_provider.go

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,13 @@ package models
22

33
// LLMProviderConfig holds the configuration for LLM API calls.
44
// If both Endpoint and APIKey are empty, the system default will be used.
5+
// If IsCustomModel is true, the user-requested slug with corresponding
6+
// API keys and endpoint should be used.
57
type LLMProviderConfig struct {
6-
Endpoint string
7-
APIKey string
8-
ModelName string
8+
Endpoint string
9+
APIKey string
10+
ModelName string
11+
IsCustomModel bool
912
}
1013

1114
// IsCustom returns true if the user has configured custom LLM provider settings.

internal/models/user.go

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,29 @@ package models
22

33
import "go.mongodb.org/mongo-driver/v2/bson"
44

5+
type CustomModel struct {
6+
Id bson.ObjectID `bson:"_id"`
7+
Slug string `bson:"slug"`
8+
Name string `bson:"name"`
9+
BaseUrl string `bson:"base_url"`
10+
APIKey string `bson:"api_key"`
11+
ContextWindow int32 `bson:"context_window"`
12+
MaxOutput int32 `bson:"max_output"`
13+
InputPrice int32 `bson:"input_price"`
14+
OutputPrice int32 `bson:"output_price"`
15+
Temperature float32 `bson:"temperature"`
16+
ParallelToolCalls bool `bson:"parallel_tool_calls"`
17+
Store bool `bson:"store"`
18+
}
19+
520
type Settings struct {
6-
ShowShortcutsAfterSelection bool `bson:"show_shortcuts_after_selection"`
7-
FullWidthPaperDebuggerButton bool `bson:"full_width_paper_debugger_button"`
8-
EnableCitationSuggestion bool `bson:"enable_citation_suggestion"`
9-
FullDocumentRag bool `bson:"full_document_rag"`
10-
ShowedOnboarding bool `bson:"showed_onboarding"`
11-
OpenAIAPIKey string `bson:"openai_api_key"`
21+
ShowShortcutsAfterSelection bool `bson:"show_shortcuts_after_selection"`
22+
FullWidthPaperDebuggerButton bool `bson:"full_width_paper_debugger_button"`
23+
EnableCitationSuggestion bool `bson:"enable_citation_suggestion"`
24+
FullDocumentRag bool `bson:"full_document_rag"`
25+
ShowedOnboarding bool `bson:"showed_onboarding"`
26+
OpenAIAPIKey string `bson:"openai_api_key"`
27+
CustomModels []CustomModel `bson:"custom_models"`
1228
}
1329

1430
type User struct {

internal/services/toolkit/client/client_v2.go

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -32,18 +32,20 @@ func (a *AIClientV2) GetOpenAIClient(llmConfig *models.LLMProviderConfig) *opena
3232
var Endpoint string = llmConfig.Endpoint
3333
var APIKey string = llmConfig.APIKey
3434

35-
if Endpoint == "" {
36-
if APIKey != "" {
37-
// User provided their own API key, use the OpenAI-compatible endpoint
38-
Endpoint = a.cfg.OpenAIBaseURL // standard openai base url
39-
} else {
40-
// suffix needed for cloudflare gateway
41-
Endpoint = a.cfg.InferenceBaseURL + "/openrouter"
35+
if !llmConfig.IsCustomModel {
36+
if Endpoint == "" {
37+
if APIKey != "" {
38+
// User provided their own API key, use the OpenAI-compatible endpoint
39+
Endpoint = a.cfg.OpenAIBaseURL // standard openai base url
40+
} else {
41+
// suffix needed for cloudflare gateway
42+
Endpoint = a.cfg.InferenceBaseURL + "/openrouter"
43+
}
4244
}
43-
}
4445

45-
if APIKey == "" {
46-
APIKey = a.cfg.InferenceAPIKey
46+
if APIKey == "" {
47+
APIKey = a.cfg.InferenceAPIKey
48+
}
4749
}
4850

4951
opts := []option.RequestOption{

internal/services/toolkit/client/completion_v2.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@ import (
2525
// 1. The full chat history sent to the language model (including any tool call results).
2626
// 2. The incremental chat history visible to the user (including tool call results and assistant responses).
2727
// 3. An error, if any occurred during the process.
28-
func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig) (OpenAIChatHistory, AppChatHistory, error) {
29-
openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV2(ctx, nil, "", modelSlug, messages, llmProvider)
28+
func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig, customModel *models.CustomModel) (OpenAIChatHistory, AppChatHistory, error) {
29+
openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV2(ctx, nil, "", modelSlug, messages, llmProvider, customModel)
3030
if err != nil {
3131
return nil, nil, err
3232
}
@@ -54,7 +54,7 @@ func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, mes
5454
// - If tool calls are required, it handles them and appends the results to the chat history, then continues the loop.
5555
// - If no tool calls are needed, it appends the assistant's response and exits the loop.
5656
// - Finally, it returns the updated chat histories and any error encountered.
57-
func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig) (OpenAIChatHistory, AppChatHistory, error) {
57+
func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig, customModel *models.CustomModel) (OpenAIChatHistory, AppChatHistory, error) {
5858
openaiChatHistory := messages
5959
inappChatHistory := AppChatHistory{}
6060

@@ -66,7 +66,7 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream
6666
}()
6767

6868
oaiClient := a.GetOpenAIClient(llmProvider)
69-
params := getDefaultParamsV2(modelSlug, a.toolCallHandler.Registry)
69+
params := getDefaultParamsV2(modelSlug, a.toolCallHandler.Registry, customModel)
7070

7171
for {
7272
params.Messages = openaiChatHistory

internal/services/toolkit/client/get_citation_keys.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ func (a *AIClientV2) GetCitationKeys(ctx context.Context, sentence string, userI
244244
_, resp, err := a.ChatCompletionV2(ctx, "gpt-5.2", OpenAIChatHistory{
245245
openai.SystemMessage("You are a helpful assistant that suggests relevant citation keys."),
246246
openai.UserMessage(message),
247-
}, llmProvider)
247+
}, llmProvider, nil)
248248

249249
if err != nil {
250250
return nil, err

internal/services/toolkit/client/get_conversation_title_v2.go

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ import (
1313
"github.com/samber/lo"
1414
)
1515

16-
func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistory []*chatv2.Message, llmProvider *models.LLMProviderConfig) (string, error) {
16+
func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistory []*chatv2.Message, llmProvider *models.LLMProviderConfig, modelSlug string, customModel *models.CustomModel) (string, error) {
1717
messages := lo.Map(inappChatHistory, func(message *chatv2.Message, _ int) string {
1818
if _, ok := message.Payload.MessageType.(*chatv2.MessagePayload_Assistant); ok {
1919
return fmt.Sprintf("Assistant: %s", message.Payload.GetAssistant().GetContent())
@@ -29,10 +29,16 @@ func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistor
2929
message := strings.Join(messages, "\n")
3030
message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message)
3131

32-
_, resp, err := a.ChatCompletionV2(ctx, "gpt-5-nano", OpenAIChatHistory{
32+
// Default model if user is not using their own
33+
modelToUse := "gpt-5-nano"
34+
if llmProvider.IsCustomModel {
35+
modelToUse = modelSlug
36+
}
37+
38+
_, resp, err := a.ChatCompletionV2(ctx, modelToUse, OpenAIChatHistory{
3339
openai.SystemMessage("You are a helpful assistant that generates a title for a conversation."),
3440
openai.UserMessage(message),
35-
}, llmProvider)
41+
}, llmProvider, customModel)
3642
if err != nil {
3743
return "", err
3844
}

0 commit comments

Comments
 (0)