Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .github/workflows/run_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Set up Node.js
uses: actions/setup-node@v4
Expand All @@ -24,6 +26,9 @@ jobs:

- name: Run tests with coverage
run: yarn vitest run --coverage

- name: Check package version matches tag
run: yarn check-version


# - name: Upload coverage to Codecov
Expand Down
17 changes: 16 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ A GitHub Action that scans your codebase for inline TODOs, FIXMEs, and BUG comme
## 🚀 Features

- ✅ Detects `TODO`, `FIXME`, `BUG`, and `HACK` comments
- ✅ Supports multiple languages: `.ts`, `.js`, `.py`, `.go`, `.html`, etc.
- ✅ Supports many languages: `.ts`, `.js`, `.py`, `.go`, `.c`, `.cpp`, `.rs`, `.html`, `.yaml`, etc.
- ✅ Skips common directories like `node_modules`, `dist`, and `coverage`
- ✅ Extracts metadata like `priority`, `due`, etc.
- ✅ Parses structured tags (`@assignee`, `#module`, `key=value`)
Expand All @@ -17,6 +17,8 @@ A GitHub Action that scans your codebase for inline TODOs, FIXMEs, and BUG comme
- ✅ Supports custom label colors and descriptions via JSON config
- ✅ Custom templates for issue titles and bodies
- ✅ LLM-powered issue title and body generation
- ✅ Automatic retry logic for OpenAI API calls
- ✅ Supports multiple LLM providers: OpenAI or Gemini
- ✅ Command-line interface for local usage
- ✅ Optional Jira synchronization

Expand Down Expand Up @@ -55,8 +57,12 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
limit: 5
llm: true
llm-provider: openai # or 'gemini'
```

Set `OPENAI_API_KEY` or `GEMINI_API_KEY` secrets based on your chosen provider.

### 2. Run the CLI locally

Use the bundled command-line interface to scan a directory on your machine and
Expand Down Expand Up @@ -159,3 +165,12 @@ smart-todo-action/
├── tsconfig.json
└── README.md
```

## 🔖 Versioning

The `check-version` script ensures the `package.json` version matches the
current Git tag. It runs in CI and can be invoked locally with:

```bash
yarn check-version
```
14 changes: 14 additions & 0 deletions action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@ inputs:
description: Use LLM to generate issue titles and bodies
default: 'false'

llm-provider:
required: false
description: LLM provider to use (`openai` or `gemini`)
default: openai

openai-api-key:
required: false
description: 'OpenAI API key used when `llm` is true'
Expand All @@ -48,6 +53,15 @@ inputs:
description: OpenAI model to use (e.g., `gpt-3.5-turbo`, `gpt-4`)
default: gpt-3.5-turbo

gemini-api-key:
required: false
description: Gemini API key used when `llm-provider` is `gemini`

gemini-model:
required: false
description: Gemini model to use (e.g., `gemini-1.5-pro`)
default: gemini-1.5-pro

sync-to-jira:
required: false
default: 'false'
Expand Down
6 changes: 4 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "smart-todo-action",
"version": "0.1.0",
"version": "1.0.0",
"description": "GitHub Action inteligente para transformar TODOs em issues e tarefas rastreáveis.",
"main": "dist/index.js",
"bin": {
Expand All @@ -11,7 +11,8 @@
"test": "vitest run",
"prepare": "yarn ncc build src/ActionMain.ts -o dist",
"changelog": "ts-node scripts/generateChangelog.ts",
"build:dist": "ncc build src/ActionMain.ts -o dist"
"build:dist": "ncc build src/ActionMain.ts -o dist",
"check-version": "ts-node scripts/checkVersionTag.ts"
},
"keywords": [
"github-action",
Expand All @@ -26,6 +27,7 @@
"dependencies": {
"@actions/core": "^1.10.0",
"@actions/github": "^6.0.1",
"@google/genai": "^1.4.0",
"@jest/globals": "^29.7.0",
"@octokit/rest": "^21.1.1",
"@types/jest": "^29.5.14",
Expand Down
32 changes: 32 additions & 0 deletions scripts/checkVersionTag.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import { readFileSync } from 'fs';
import { execSync } from 'child_process';

const pkg = JSON.parse(readFileSync('package.json', 'utf8'));

function getGitTag(): string | null {
const envTag = process.env.GITHUB_REF?.startsWith('refs/tags/')
? process.env.GITHUB_REF.replace('refs/tags/', '')
: undefined;
if (envTag) return envTag;
try {
return execSync('git describe --tags --exact-match').toString().trim();
} catch {
return null;
}
}

const tag = getGitTag();
if (!tag) {
console.log('No git tag found; skipping version check');
process.exit(0);
}

const normalizedTag = tag.startsWith('v') ? tag.slice(1) : tag;
if (pkg.version !== normalizedTag) {
console.error(
`Version mismatch: package.json is ${pkg.version} but tag is ${tag}`
);
process.exit(1);
}

console.log(`\u2714\ufe0f package.json version ${pkg.version} matches tag ${tag}`);
13 changes: 11 additions & 2 deletions src/ActionMain.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,20 @@ async function run(): Promise<void> {
const workspace = process.env.GITHUB_WORKSPACE || '.';

// LLM support
process.env.OPENAI_API_KEY = core.getInput('openai-api-key') || process.env.OPENAI_API_KEY;
const llmProvider = core.getInput('llm-provider') || 'openai';
process.env.LLM_PROVIDER = llmProvider;
if (llmProvider === 'gemini') {
process.env.GEMINI_API_KEY = core.getInput('gemini-api-key') || process.env.GEMINI_API_KEY;
} else {
process.env.OPENAI_API_KEY = core.getInput('openai-api-key') || process.env.OPENAI_API_KEY;
}
const useLLM = core.getInput('llm') === 'true';
if (useLLM && !process.env.OPENAI_API_KEY) {
if (useLLM && llmProvider === 'openai' && !process.env.OPENAI_API_KEY) {
core.warning('⚠️ LLM is enabled, but OPENAI_API_KEY is not set.');
}
if (useLLM && llmProvider === 'gemini' && !process.env.GEMINI_API_KEY) {
core.warning('⚠️ LLM is enabled, but GEMINI_API_KEY is not set.');
}

const useStructured = core.getInput('structured') === 'true';

Expand Down
27 changes: 15 additions & 12 deletions src/core/llm/generateIssueContent.ts
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
// src/core/llm/generateIssueContent.ts
import { TodoItem } from '../../parser/types';
import OpenAI from 'openai';
import * as core from '@actions/core';
import { chatCompletionWithRetry } from './llmClient';

const openai = new OpenAI({
apiKey: core.getInput('openai-api-key'), // correto agora
});

const model = core.getInput('openai-model') || 'gpt-3.5-turbo';
const provider = core.getInput('llm-provider') || 'openai';
const model =
provider === 'gemini'
? core.getInput('gemini-model') || 'gemini-1.5-pro'
: core.getInput('openai-model') || 'gpt-3.5-turbo';

export async function generateIssueTitleAndBodyLLM(todo: TodoItem): Promise<{ title: string; body: string }> {
const prompt = `
Expand All @@ -27,17 +27,20 @@ TITLE: <title>
BODY:
<detailed body>
`;
// 👇 Adiciona aqui
core.debug(`[DEBUG] OpenAI key starts with: ${process.env.OPENAI_API_KEY?.slice(0, 5)}`);
core.debug(`[DEBUG] LLM provider: ${provider}`);
if (provider === 'openai') {
core.debug(`[DEBUG] OpenAI key starts with: ${process.env.OPENAI_API_KEY?.slice(0, 5)}`);
} else {
core.debug(`[DEBUG] Gemini key starts with: ${process.env.GEMINI_API_KEY?.slice(0, 5)}`);
}
core.debug(`[DEBUG] Using model: ${model}`);
core.debug('[DEBUG] Sending prompt to OpenAI...');
core.debug('[DEBUG] Sending prompt to LLM...');
try {
const response = await openai.chat.completions.create({
const response = await chatCompletionWithRetry({
model,
messages: [{ role: 'user', content: prompt }],
temperature: 0.4,
});
// TODO(priority=high): improve retry logic for API errors
const result = response.choices[0].message?.content || '';
const match = result.match(/TITLE:\s*(.+?)\s*BODY:\s*([\s\S]*)/i);

Expand All @@ -48,7 +51,7 @@ try {
const [, title, body] = match;
return { title: title.trim(), body: body.trim() };
} catch (err: any) {
console.error('[ERROR] OpenAI call failed:', err);
console.error('[ERROR] LLM call failed:', err);
throw err;
}
}
Expand Down
54 changes: 54 additions & 0 deletions src/core/llm/llmClient.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// src/core/llm/openaiClient.ts
import OpenAI from 'openai';
import { GoogleGenAI } from '@google/genai';
import * as core from '@actions/core';

const provider = core.getInput('llm-provider') || 'openai';

export const openai = new OpenAI({
apiKey: core.getInput('openai-api-key'),
});

export const gemini = new GoogleGenAI({
apiKey: core.getInput('gemini-api-key'),
});

/**
* Wraps `openai.chat.completions.create` with simple retry logic.
* Retries on failure with exponential backoff.
*
* @param params Parameters forwarded to OpenAI
* @param maxRetries Maximum number of attempts before throwing
*/
export interface ChatCompletionParams {
model: string;
messages: { role: string; content: string }[];
temperature?: number;
}

export async function chatCompletionWithRetry(
params: ChatCompletionParams,
maxRetries = 3
): Promise<{ choices: { message: { content: string } }[] }> {
let attempt = 0;
for (;;) {
try {
if (provider === 'gemini') {
const prompt = params.messages.map(m => m.content).join('\n');
const response = await gemini.models.generateContent({
model: params.model,
contents: prompt,
generationConfig: { temperature: params.temperature },
} as any);
return { choices: [{ message: { content: (response as any).text } }] } as any;
}
return (await openai.chat.completions.create(params as any)) as any;
} catch (err) {
attempt++;
if (attempt > maxRetries) throw err;
const delay = Math.min(1000 * 2 ** attempt, 5000);
core.warning(`LLM request failed (attempt ${attempt}). Retrying in ${delay}ms...`);
await new Promise(res => setTimeout(res, delay));
}
}
}
7 changes: 0 additions & 7 deletions src/core/llm/openaiClient.ts

This file was deleted.

4 changes: 2 additions & 2 deletions src/parser/extractTodos.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ import { TodoItem } from './types';
import { normalizeTag } from '../utils/isTextFile';

const COMMENT_PATTERNS = [
{ ext: ['.ts', '.js', '.java', '.go'], pattern: /^\s*\/\/\s*(.*)$/ },
{ ext: ['.py', '.sh', '.rb'], pattern: /^\s*#\s*(.*)$/ },
{ ext: ['.ts', '.js', '.java', '.go', '.c', '.cpp', '.cs', '.rs', '.php', '.h', '.hpp'], pattern: /^\s*\/\/\s*(.*)$/ },
{ ext: ['.py', '.sh', '.rb', '.yaml', '.yml'], pattern: /^\s*#\s*(.*)$/ },
{ ext: ['.html', '.xml'], pattern: /<!--\s*(.*?)\s*-->/ }
];

Expand Down
4 changes: 2 additions & 2 deletions src/parser/extractTodosFromContent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ import { TodoItem } from './types';
import { normalizeTag } from '../utils/isTextFile';

const COMMENT_PATTERNS = [
{ ext: ['.ts', '.js', '.java', '.go'], pattern: /^\s*\/\/\s*(.*)$/ },
{ ext: ['.py', '.sh', '.rb'], pattern: /^\s*#\s*(.*)$/ },
{ ext: ['.ts', '.js', '.java', '.go', '.c', '.cpp', '.cs', '.rs', '.php', '.h', '.hpp'], pattern: /^\s*\/\/\s*(.*)$/ },
{ ext: ['.py', '.sh', '.rb', '.yaml', '.yml'], pattern: /^\s*#\s*(.*)$/ },
{ ext: ['.html', '.xml'], pattern: /<!--\s*(.*?)\s*-->/ }
];

Expand Down
2 changes: 1 addition & 1 deletion src/parser/extractTodosFromDir.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import path from 'path';
import { extractTodosFromFile } from './extractTodos';
import { TodoItem } from './types';

const SUPPORTED_EXTENSIONS = ['.ts', '.js', '.py', '.go', '.java', '.rb', '.sh', '.html', '.xml'];
const SUPPORTED_EXTENSIONS = ['.ts', '.js', '.py', '.go', '.java', '.rb', '.sh', '.c', '.cpp', '.cs', '.rs', '.php', '.h', '.hpp', '.html', '.xml', '.yaml', '.yml'];
const IGNORED_DIRS = ['node_modules', 'dist', 'coverage'];

export function extractTodosFromDir(dirPath: string): TodoItem[] {
Expand Down
4 changes: 2 additions & 2 deletions src/utils/isTextFile.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
* Useful for filtering files before parsing for TODOs.
*/
export function isTextFile(filename: string): boolean {
return /\.(ts|js|jsx|tsx|py|rb|sh|go|java|html|css|json|md|txt|xml|yaml|yml)$/i.test(filename);
}
return /\.(ts|js|jsx|tsx|py|rb|sh|go|java|c|cpp|cs|rs|php|h|hpp|html|css|json|md|txt|xml|yaml|yml)$/i.test(filename);
}

/**
* List of known multilingual aliases for TODO-related tags.
Expand Down
8 changes: 4 additions & 4 deletions tests/commentPatterns.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ import { describe, it, expect } from 'vitest';
import { extractTodosFromString } from '../src/parser/extractTodosFromContent';

describe('extractTodosFromString - comment support by extension', () => {
it('extracts from JS-style (//) for .js/.ts/.go/.java', () => {
it('extracts from JS-style (//) for many languages', () => {
const code = `// TODO: js comment\n// BUG: broken`;
const extensions = ['.js', '.ts', '.go', '.java'];
const extensions = ['.js', '.ts', '.go', '.java', '.c', '.cpp', '.rs'];

for (const ext of extensions) {
const todos = extractTodosFromString(code, ext);
Expand All @@ -14,9 +14,9 @@ describe('extractTodosFromString - comment support by extension', () => {
}
});

it('extracts from Python-style (#) for .py/.sh/.rb', () => {
it('extracts from Python-style (#) for .py/.sh/.rb/.yaml', () => {
const code = `# TODO: python comment\n# FIXME: fix me`;
const extensions = ['.py', '.sh', '.rb'];
const extensions = ['.py', '.sh', '.rb', '.yaml'];

for (const ext of extensions) {
const todos = extractTodosFromString(code, ext);
Expand Down
Loading
Loading