11// src/core/llm/generateIssueContent.ts
22import { TodoItem } from '../../parser/types' ;
3- import OpenAI from 'openai' ;
43import * as core from '@actions/core' ;
4+ import { chatCompletionWithRetry } from './llmClient' ;
55
6- const openai = new OpenAI ( {
7- apiKey : core . getInput ( 'openai-api-key' ) , // correto agora
8- } ) ;
9-
10- const model = core . getInput ( 'openai-model' ) || 'gpt-3.5-turbo' ;
6+ const provider = core . getInput ( 'llm-provider' ) || 'openai' ;
7+ const model =
8+ provider === 'gemini'
9+ ? core . getInput ( 'gemini-model' ) || 'gemini-1.5-pro'
10+ : core . getInput ( 'openai-model' ) || 'gpt-3.5-turbo' ;
1111
1212export async function generateIssueTitleAndBodyLLM ( todo : TodoItem ) : Promise < { title : string ; body : string } > {
1313 const prompt = `
@@ -27,17 +27,20 @@ TITLE: <title>
2727BODY:
2828<detailed body>
2929` ;
30- // 👇 Adiciona aqui
31- core . debug ( `[DEBUG] OpenAI key starts with: ${ process . env . OPENAI_API_KEY ?. slice ( 0 , 5 ) } ` ) ;
30+ core . debug ( `[DEBUG] LLM provider: ${ provider } ` ) ;
31+ if ( provider === 'openai' ) {
32+ core . debug ( `[DEBUG] OpenAI key starts with: ${ process . env . OPENAI_API_KEY ?. slice ( 0 , 5 ) } ` ) ;
33+ } else {
34+ core . debug ( `[DEBUG] Gemini key starts with: ${ process . env . GEMINI_API_KEY ?. slice ( 0 , 5 ) } ` ) ;
35+ }
3236 core . debug ( `[DEBUG] Using model: ${ model } ` ) ;
33- core . debug ( '[DEBUG] Sending prompt to OpenAI ...' ) ;
37+ core . debug ( '[DEBUG] Sending prompt to LLM ...' ) ;
3438try {
35- const response = await openai . chat . completions . create ( {
39+ const response = await chatCompletionWithRetry ( {
3640 model,
3741 messages : [ { role : 'user' , content : prompt } ] ,
3842 temperature : 0.4 ,
3943 } ) ;
40- // TODO(priority=high): improve retry logic for API errors
4144 const result = response . choices [ 0 ] . message ?. content || '' ;
4245 const match = result . match ( / T I T L E : \s * ( .+ ?) \s * B O D Y : \s * ( [ \s \S ] * ) / i) ;
4346
4851 const [ , title , body ] = match ;
4952 return { title : title . trim ( ) , body : body . trim ( ) } ;
5053} catch ( err : any ) {
51- console . error ( '[ERROR] OpenAI call failed:' , err ) ;
54+ console . error ( '[ERROR] LLM call failed:' , err ) ;
5255 throw err ;
5356}
5457}
0 commit comments