-
Notifications
You must be signed in to change notification settings - Fork 1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat(lark): Add /ask command using OpenAI #251
Open
wuhuizuo
wants to merge
5
commits into
main
Choose a base branch
from
feature/lark-with-ai
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from 3 commits
Commits
Show all changes
5 commits
Select commit
Hold shift + click to select a range
5ef0ec7
feat(lark): Add /ask command using OpenAI
wuhuizuo 6a6844a
feat(ask): Add MCP tool listing
wuhuizuo 209760c
feat(chatops-lark): add LLM and MCP tool integration for /ask command
wuhuizuo dfc2881
Update chatops-lark/pkg/events/handler/ask.go
wuhuizuo 54f879a
chore(chatops-lark): add debug logs for MCP and LLM client initializa…
wuhuizuo File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,194 @@ | ||
package handler | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"strings" | ||
|
||
mcpclient "github.com/mark3labs/mcp-go/client" | ||
"github.com/openai/openai-go" | ||
"github.com/openai/openai-go/azure" | ||
"github.com/rs/zerolog/log" | ||
) | ||
|
||
const ( | ||
cfgKeyAskLlmCfg = "ask.llm.azure_config" | ||
cfgKeyAskLlmModel = "ask.llm.model" | ||
cfgKeyAskLlmSystemPrompt = "ask.llm.system_prompt" | ||
cfgKeyAskLlmMcpServers = "ask.llm.mcp_servers" | ||
|
||
ctxKeyLlmClient = "llm.client" | ||
ctxKeyLlmModel = "llm.model" | ||
ctxKeyLlmSystemPrompt = "llm.system_prompt" | ||
ctxKeyLLmTools = "llm.tools" | ||
ctxKeyMcpClients = "llm.mcp_clients" | ||
) | ||
|
||
const ( | ||
askHelpText = `missing question | ||
|
||
Usage: /ask <question...> | ||
|
||
Example: | ||
/ask What is the on-call schedule for the infra team this week? | ||
/ask How do I debug error code 1234 in service X? | ||
|
||
For more details, use: /ask --help | ||
` | ||
|
||
askDetailedHelpText = `Usage: /ask <question...> | ||
|
||
Description: | ||
Asks an AI assistant a question. The assistant may leverage internal tools (MCP context) | ||
to provide relevant and up-to-date information alongside its general knowledge. | ||
|
||
Examples: | ||
/ask What is the current status of the main production cluster? | ||
/ask Explain the purpose of the 'widget-processor' microservice. | ||
/ask Summarize the recent alerts for the database tier. | ||
/ask How do I request access to the staging environment? | ||
|
||
Use '/ask --help' or '/ask -h' to see this message. | ||
` | ||
) | ||
|
||
// runCommandAsk handles the /ask command logic. | ||
func runCommandAsk(ctx context.Context, args []string) (string, error) { | ||
if len(args) == 0 { | ||
// No question provided | ||
return "", fmt.Errorf(askHelpText) | ||
} | ||
|
||
// Check for help flags explicitly, as there are no subcommands | ||
firstArg := args[0] | ||
if firstArg == "-h" || firstArg == "--help" { | ||
if len(args) == 1 { | ||
return askDetailedHelpText, nil | ||
} | ||
// Allow asking help *about* something, e.g. /ask --help tools | ||
// But for now, just treat any args after --help as part of the help request itself. | ||
// Let's just return the detailed help for simplicity. | ||
// Alternatively, could error out: | ||
// return "", fmt.Errorf("unknown arguments after %s: %v", firstArg, args[1:]) | ||
return askDetailedHelpText, nil | ||
} | ||
|
||
// The entire argument list forms the question | ||
question := strings.Join(args, " ") | ||
|
||
// AI/Tool interaction | ||
// Here you would: | ||
// 1. Parse the question for intent or specific tool requests (if applicable). | ||
// 2. Potentially query MCP tools based on the question to gather context. | ||
// 3. Format a prompt including the user's question and any gathered context. | ||
// 4. Send the prompt to the configured LLM. | ||
// 5. Receive the LLM's response. | ||
// 6. Format the response for Lark. | ||
result, err := processAskRequest(ctx, question) | ||
if err != nil { | ||
return "", fmt.Errorf("failed to process ask request: %w", err) | ||
} | ||
// --- End Placeholder --- | ||
wuhuizuo marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
return result, nil | ||
} | ||
|
||
// processAskRequest will interact with the LLM and tools. | ||
func processAskRequest(ctx context.Context, question string) (string, error) { | ||
client := ctx.Value(ctxKeyLlmClient).(*openai.Client) | ||
// openaiModel := ctx.Value(ctxKeyLlmModel).(shared.ChatModel) | ||
systemPrompt := ctx.Value(ctxKeyLlmSystemPrompt).(string) | ||
tools := ctx.Value(ctxKeyLLmTools).([]openai.ChatCompletionToolParam) | ||
|
||
llmParams := openai.ChatCompletionNewParams{ | ||
Messages: []openai.ChatCompletionMessageParamUnion{ | ||
openai.SystemMessage(systemPrompt), | ||
openai.UserMessage(question), | ||
}, | ||
Tools: tools, | ||
Model: openai.ChatModelGPT4o, | ||
Seed: openai.Int(1), | ||
} | ||
|
||
clients := ctx.Value(ctxKeyMcpClients).([]mcpclient.MCPClient) | ||
mcpToolMap := getFunctionMCPClientMap(ctx, clients) | ||
|
||
for { | ||
completion, err := client.Chat.Completions.New(ctx, llmParams) | ||
if err != nil { | ||
log.Err(err).Msg("failed to create chat completion") | ||
return "", fmt.Errorf("failed to create chat completion: %w", err) | ||
} | ||
|
||
toolCalls := completion.Choices[0].Message.ToolCalls | ||
if len(toolCalls) == 0 { | ||
return completion.Choices[0].Message.Content, nil | ||
} | ||
|
||
// If there is a was a function call, continue the conversation | ||
llmParams.Messages = append(llmParams.Messages, completion.Choices[0].Message.ToParam()) | ||
for _, toolCall := range toolCalls { | ||
if client, ok := mcpToolMap[toolCall.Function.Name]; ok { | ||
toolResData, err := processMcpToolCall(ctx, client, toolCall) | ||
if err != nil { | ||
log.Err(err).Msg("failed to process tool call") | ||
return "", fmt.Errorf("failed to process tool call: %w", err) | ||
} | ||
|
||
llmParams.Messages = append(llmParams.Messages, openai.ToolMessage(toolResData, toolCall.ID)) | ||
log.Debug().Any("message", llmParams.Messages[len(llmParams.Messages)-1]).Msg("message") | ||
} | ||
} | ||
} | ||
} | ||
|
||
func setupAskCtx(ctx context.Context, config map[string]any, _ *CommandSender) context.Context { | ||
log.Info().Msg("debug") | ||
// Initialize LLM client | ||
var client openai.Client | ||
{ | ||
llmCfg := config[cfgKeyAskLlmCfg] | ||
switch v := llmCfg.(type) { | ||
case map[string]any: | ||
apiKey := v["api_key"].(string) | ||
endpointURL := v["base_url"].(string) | ||
apiVesion := v["api_version"].(string) | ||
client = openai.NewClient( | ||
azure.WithAPIKey(apiKey), | ||
azure.WithEndpoint(endpointURL, apiVesion), | ||
) | ||
default: | ||
client = openai.NewClient() | ||
} | ||
} | ||
|
||
// Initialize LLM tools | ||
var mcpClients []mcpclient.MCPClient | ||
var toolDeclarations []openai.ChatCompletionToolParam | ||
{ | ||
mcpCfg := config[cfgKeyAskLlmMcpServers] | ||
log.Info().Any("mcpCfg", mcpCfg).Msg("start to initialize MCP client") | ||
switch v := mcpCfg.(type) { | ||
case map[string]any: | ||
for name, cfg := range v { | ||
url := cfg.(map[string]any)["base_url"].(string) | ||
client, declarations, err := initializeMCPClient(ctx, name, url) | ||
if err != nil { | ||
log.Err(err).Str("name", name).Str("url", url).Msg("failed to initialize MCP SSE client") | ||
continue | ||
} | ||
mcpClients = append(mcpClients, client) | ||
toolDeclarations = append(toolDeclarations, declarations...) | ||
} | ||
} | ||
} | ||
|
||
// Setup context | ||
newCtx := context.WithValue(ctx, ctxKeyLlmClient, &client) | ||
newCtx = context.WithValue(newCtx, ctxKeyLlmModel, config[cfgKeyAskLlmModel]) | ||
newCtx = context.WithValue(newCtx, ctxKeyLlmSystemPrompt, config[cfgKeyAskLlmSystemPrompt]) | ||
newCtx = context.WithValue(newCtx, ctxKeyLLmTools, toolDeclarations) | ||
newCtx = context.WithValue(newCtx, ctxKeyMcpClients, mcpClients) | ||
|
||
return newCtx | ||
} |
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Consider adding more context to the error message, such as the question that was asked. This can help with debugging when the ask request fails.