-
Notifications
You must be signed in to change notification settings - Fork 1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat(lark): Add /ask command using OpenAI #251
base: main
Are you sure you want to change the base?
Changes from 1 commit
5ef0ec7
6a6844a
209760c
dfc2881
54f879a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,134 @@ | ||
package handler | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"strings" | ||
|
||
"github.com/rs/zerolog/log" | ||
"github.com/sashabaranov/go-openai" | ||
) | ||
|
||
const ( | ||
askHelpText = `missing question | ||
|
||
Usage: /ask <question...> | ||
|
||
Example: | ||
/ask What is the on-call schedule for the infra team this week? | ||
/ask How do I debug error code 1234 in service X? | ||
|
||
For more details, use: /ask --help | ||
` | ||
|
||
askDetailedHelpText = `Usage: /ask <question...> | ||
|
||
Description: | ||
Asks an AI assistant a question. The assistant may leverage internal tools (MCP context) | ||
to provide relevant and up-to-date information alongside its general knowledge. | ||
|
||
Examples: | ||
/ask What is the current status of the main production cluster? | ||
/ask Explain the purpose of the 'widget-processor' microservice. | ||
/ask Summarize the recent alerts for the database tier. | ||
/ask How do I request access to the staging environment? | ||
|
||
Use '/ask --help' or '/ask -h' to see this message. | ||
` | ||
) | ||
|
||
// runCommandAsk handles the /ask command logic. | ||
func runCommandAsk(ctx context.Context, args []string) (string, error) { | ||
if len(args) == 0 { | ||
// No question provided | ||
return "", fmt.Errorf(askHelpText) | ||
} | ||
|
||
// Check for help flags explicitly, as there are no subcommands | ||
firstArg := args[0] | ||
if firstArg == "-h" || firstArg == "--help" { | ||
if len(args) == 1 { | ||
return askDetailedHelpText, nil | ||
} | ||
// Allow asking help *about* something, e.g. /ask --help tools | ||
// But for now, just treat any args after --help as part of the help request itself. | ||
// Let's just return the detailed help for simplicity. | ||
// Alternatively, could error out: | ||
// return "", fmt.Errorf("unknown arguments after %s: %v", firstArg, args[1:]) | ||
return askDetailedHelpText, nil | ||
} | ||
|
||
// The entire argument list forms the question | ||
question := strings.Join(args, " ") | ||
|
||
// --- Placeholder for actual AI/Tool interaction --- | ||
// Here you would: | ||
// 1. Parse the question for intent or specific tool requests (if applicable). | ||
// 2. Potentially query MCP tools based on the question to gather context. | ||
// 3. Format a prompt including the user's question and any gathered context. | ||
// 4. Send the prompt to the configured LLM. | ||
// 5. Receive the LLM's response. | ||
// 6. Format the response for Lark. | ||
result, err := processAskRequest(ctx, question) | ||
if err != nil { | ||
return "", fmt.Errorf("failed to process ask request: %w", err) | ||
} | ||
// --- End Placeholder --- | ||
wuhuizuo marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
return result, nil | ||
} | ||
|
||
// processAskRequest is a placeholder for the core logic interacting with the LLM and tools. | ||
// TODO: Implement the actual interaction with the AI model and MCP tools. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is an important // TODO: Implement the actual interaction with the AI model and MCP tools.
// This implementation should include parsing the question, querying MCP tools, formatting a prompt, sending the prompt to the LLM, receiving the response, and formatting the response for Lark. |
||
func processAskRequest(ctx context.Context, question string) (string, error) { | ||
// Simulate processing and generating a response | ||
// In a real implementation, this would involve API calls to an LLM service | ||
// and potentially calls to internal "MCP tools" APIs. | ||
fmt.Printf("Processing ask request for question: %q\n", question) // Log for debugging | ||
|
||
openaiCfg := ctx.Value(ctxKeyOpenAIConfig) | ||
openaiModel := ctx.Value(ctxKeyOpenAIModel) | ||
systemPrompt := ctx.Value(ctxKeyOpenAISystemPrompt) | ||
log.Debug(). | ||
Any("base_url", openaiCfg.(openai.ClientConfig).BaseURL). | ||
Any("systemPrompt", systemPrompt). | ||
Msg("debug vars") | ||
client := openai.NewClientWithConfig(openaiCfg.(openai.ClientConfig)) | ||
|
||
resp, err := client.CreateChatCompletion( | ||
context.Background(), | ||
openai.ChatCompletionRequest{ | ||
Model: openaiModel.(string), | ||
Messages: []openai.ChatCompletionMessage{ | ||
{ | ||
Role: openai.ChatMessageRoleSystem, | ||
Content: systemPrompt.(string), | ||
}, | ||
{ | ||
Role: openai.ChatMessageRoleUser, | ||
Content: question, | ||
}, | ||
}, | ||
}, | ||
) | ||
|
||
if err != nil { | ||
log.Err(err).Msg("failed to create chat completion") | ||
return "", fmt.Errorf("failed to create chat completion: %w", err) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
} | ||
response := resp.Choices[0].Message.Content | ||
|
||
// // Example of how context *could* be added (replace with actual tool calls) | ||
// if strings.Contains(strings.ToLower(question), "status") && strings.Contains(strings.ToLower(question), "production") { | ||
// // Pretend we called an MCP tool for cluster status | ||
// mcpContext := "\n[MCP Tool Context: Production cluster status is currently nominal.]" | ||
// response += mcpContext | ||
// } | ||
|
||
// // Simulate potential error | ||
// if strings.Contains(strings.ToLower(question), "error simulation") { | ||
// return "", fmt.Errorf("simulated error during AI processing") | ||
// } | ||
|
||
return response, nil | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,11 +16,15 @@ import ( | |
larkim "github.com/larksuite/oapi-sdk-go/v3/service/im/v1" | ||
"github.com/rs/zerolog" | ||
"github.com/rs/zerolog/log" | ||
"github.com/sashabaranov/go-openai" | ||
) | ||
|
||
const ( | ||
ctxKeyGithubToken = "github_token" | ||
ctxKeyLarkSenderEmail = "lark.sender.email" | ||
ctxKeyGithubToken = "github_token" | ||
ctxKeyLarkSenderEmail = "lark.sender.email" | ||
ctxKeyOpenAIConfig = "openai.config" | ||
ctxKeyOpenAIModel = "openai.model" | ||
ctxKeyOpenAISystemPrompt = "openai.system_prompt" | ||
|
||
// Message types | ||
msgTypePrivate = "private" | ||
|
@@ -52,6 +56,28 @@ var commandConfigs = map[string]CommandConfig{ | |
return context.WithValue(ctx, ctxKeyLarkSenderEmail, sender.Email) | ||
}, | ||
}, | ||
"/ask": { | ||
Handler: runCommandAsk, | ||
SetupContext: func(ctx context.Context, config map[string]any, sender *CommandSender) context.Context { | ||
cfg := config["openai.config"] | ||
var openaiCfg openai.ClientConfig | ||
switch v := cfg.(type) { | ||
case map[string]any: | ||
openaiCfg = openai.DefaultConfig(v["api_key"].(string)) | ||
openaiCfg.BaseURL = v["base_url"].(string) | ||
openaiCfg.APIType = openai.APIType(v["api_type"].(string)) | ||
openaiCfg.APIVersion = v["api_version"].(string) | ||
openaiCfg.Engine = v["engine"].(string) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It would be beneficial to add error checking after each type assertion to ensure the values are of the expected type. This can prevent unexpected panics if the configuration is not set up correctly. if apiKey, ok := v["api_key"].(string); ok {
openaiCfg = openai.DefaultConfig(apiKey)
} else {
log.Error().Msg("api_key not found or not a string")
return ctx // or return an error, depending on desired behavior
}
if baseURL, ok := v["base_url"].(string); ok {
openaiCfg.BaseURL = baseURL
} else {
log.Error().Msg("base_url not found or not a string")
}
if apiType, ok := v["api_type"].(string); ok {
openaiCfg.APIType = openai.APIType(apiType)
} else {
log.Error().Msg("api_type not found or not a string")
}
if apiVersion, ok := v["api_version"].(string); ok {
openaiCfg.APIVersion = apiVersion
} else {
log.Error().Msg("api_version not found or not a string")
}
if engine, ok := v["engine"].(string); ok {
openaiCfg.Engine = engine
} else {
log.Error().Msg("engine not found or not a string")
} |
||
default: | ||
openaiCfg = openai.DefaultConfig("") | ||
} | ||
|
||
newCtx := context.WithValue(ctx, ctxKeyOpenAIConfig, openaiCfg) | ||
newCtx = context.WithValue(newCtx, ctxKeyOpenAIModel, config["openai.model"]) | ||
newCtx = context.WithValue(newCtx, ctxKeyOpenAISystemPrompt, config["openai.system_prompt"]) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consider adding error checking to ensure that model, ok := config["openai.model"].(string)
if !ok {
log.Error().Msg("openai.model not found or not a string")
return ctx // or return an error
}
systemPrompt, ok := config["openai.system_prompt"].(string)
if !ok {
log.Error().Msg("openai.system_prompt not found or not a string")
return ctx // or return an error
}
newCtx := context.WithValue(ctx, ctxKeyOpenAIConfig, openaiCfg)
newCtx = context.WithValue(newCtx, ctxKeyOpenAIModel, model)
newCtx = context.WithValue(newCtx, ctxKeyOpenAISystemPrompt, systemPrompt)
return newCtx |
||
return newCtx | ||
}, | ||
}, | ||
} | ||
|
||
type Command struct { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Consider adding more context to the error message, such as the question that was asked. This can help with debugging when the ask request fails.