diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 2aab3ad..d5ea8c6 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -13,6 +13,8 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Set up Node.js uses: actions/setup-node@v4 @@ -24,6 +26,9 @@ jobs: - name: Run tests with coverage run: yarn vitest run --coverage + + - name: Check package version matches tag + run: yarn check-version # - name: Upload coverage to Codecov diff --git a/README.md b/README.md index 3025feb..9a74a6c 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ A GitHub Action that scans your codebase for inline TODOs, FIXMEs, and BUG comme ## πŸš€ Features - βœ… Detects `TODO`, `FIXME`, `BUG`, and `HACK` comments -- βœ… Supports multiple languages: `.ts`, `.js`, `.py`, `.go`, `.html`, etc. +- βœ… Supports many languages: `.ts`, `.js`, `.py`, `.go`, `.c`, `.cpp`, `.rs`, `.html`, `.yaml`, etc. - βœ… Skips common directories like `node_modules`, `dist`, and `coverage` - βœ… Extracts metadata like `priority`, `due`, etc. - βœ… Parses structured tags (`@assignee`, `#module`, `key=value`) @@ -17,6 +17,8 @@ A GitHub Action that scans your codebase for inline TODOs, FIXMEs, and BUG comme - βœ… Supports custom label colors and descriptions via JSON config - βœ… Custom templates for issue titles and bodies - βœ… LLM-powered issue title and body generation +- βœ… Automatic retry logic for OpenAI API calls +- βœ… Supports multiple LLM providers: OpenAI or Gemini - βœ… Command-line interface for local usage - βœ… Optional Jira synchronization @@ -55,8 +57,12 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} limit: 5 + llm: true + llm-provider: openai # or 'gemini' ``` +Set `OPENAI_API_KEY` or `GEMINI_API_KEY` secrets based on your chosen provider. + ### 2. Run the CLI locally Use the bundled command-line interface to scan a directory on your machine and @@ -159,3 +165,12 @@ smart-todo-action/ β”œβ”€β”€ tsconfig.json └── README.md ``` + +## πŸ”– Versioning + +The `check-version` script ensures the `package.json` version matches the +current Git tag. It runs in CI and can be invoked locally with: + +```bash +yarn check-version +``` diff --git a/action.yml b/action.yml index faf433f..9747974 100644 --- a/action.yml +++ b/action.yml @@ -39,6 +39,11 @@ inputs: description: Use LLM to generate issue titles and bodies default: 'false' + llm-provider: + required: false + description: LLM provider to use (`openai` or `gemini`) + default: openai + openai-api-key: required: false description: 'OpenAI API key used when `llm` is true' @@ -48,6 +53,15 @@ inputs: description: OpenAI model to use (e.g., `gpt-3.5-turbo`, `gpt-4`) default: gpt-3.5-turbo + gemini-api-key: + required: false + description: Gemini API key used when `llm-provider` is `gemini` + + gemini-model: + required: false + description: Gemini model to use (e.g., `gemini-1.5-pro`) + default: gemini-1.5-pro + sync-to-jira: required: false default: 'false' diff --git a/package.json b/package.json index ea6e50c..649c408 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "smart-todo-action", - "version": "0.1.0", + "version": "1.0.0", "description": "GitHub Action inteligente para transformar TODOs em issues e tarefas rastreΓ‘veis.", "main": "dist/index.js", "bin": { @@ -11,7 +11,8 @@ "test": "vitest run", "prepare": "yarn ncc build src/ActionMain.ts -o dist", "changelog": "ts-node scripts/generateChangelog.ts", - "build:dist": "ncc build src/ActionMain.ts -o dist" + "build:dist": "ncc build src/ActionMain.ts -o dist", + "check-version": "ts-node scripts/checkVersionTag.ts" }, "keywords": [ "github-action", @@ -26,6 +27,7 @@ "dependencies": { "@actions/core": "^1.10.0", "@actions/github": "^6.0.1", + "@google/genai": "^1.4.0", "@jest/globals": "^29.7.0", "@octokit/rest": "^21.1.1", "@types/jest": "^29.5.14", diff --git a/scripts/checkVersionTag.ts b/scripts/checkVersionTag.ts new file mode 100644 index 0000000..87a43fc --- /dev/null +++ b/scripts/checkVersionTag.ts @@ -0,0 +1,32 @@ +import { readFileSync } from 'fs'; +import { execSync } from 'child_process'; + +const pkg = JSON.parse(readFileSync('package.json', 'utf8')); + +function getGitTag(): string | null { + const envTag = process.env.GITHUB_REF?.startsWith('refs/tags/') + ? process.env.GITHUB_REF.replace('refs/tags/', '') + : undefined; + if (envTag) return envTag; + try { + return execSync('git describe --tags --exact-match').toString().trim(); + } catch { + return null; + } +} + +const tag = getGitTag(); +if (!tag) { + console.log('No git tag found; skipping version check'); + process.exit(0); +} + +const normalizedTag = tag.startsWith('v') ? tag.slice(1) : tag; +if (pkg.version !== normalizedTag) { + console.error( + `Version mismatch: package.json is ${pkg.version} but tag is ${tag}` + ); + process.exit(1); +} + +console.log(`\u2714\ufe0f package.json version ${pkg.version} matches tag ${tag}`); diff --git a/src/ActionMain.ts b/src/ActionMain.ts index 96c961f..e539d0d 100644 --- a/src/ActionMain.ts +++ b/src/ActionMain.ts @@ -23,11 +23,20 @@ async function run(): Promise { const workspace = process.env.GITHUB_WORKSPACE || '.'; // LLM support - process.env.OPENAI_API_KEY = core.getInput('openai-api-key') || process.env.OPENAI_API_KEY; + const llmProvider = core.getInput('llm-provider') || 'openai'; + process.env.LLM_PROVIDER = llmProvider; + if (llmProvider === 'gemini') { + process.env.GEMINI_API_KEY = core.getInput('gemini-api-key') || process.env.GEMINI_API_KEY; + } else { + process.env.OPENAI_API_KEY = core.getInput('openai-api-key') || process.env.OPENAI_API_KEY; + } const useLLM = core.getInput('llm') === 'true'; - if (useLLM && !process.env.OPENAI_API_KEY) { + if (useLLM && llmProvider === 'openai' && !process.env.OPENAI_API_KEY) { core.warning('⚠️ LLM is enabled, but OPENAI_API_KEY is not set.'); } + if (useLLM && llmProvider === 'gemini' && !process.env.GEMINI_API_KEY) { + core.warning('⚠️ LLM is enabled, but GEMINI_API_KEY is not set.'); + } const useStructured = core.getInput('structured') === 'true'; diff --git a/src/core/llm/generateIssueContent.ts b/src/core/llm/generateIssueContent.ts index c73f489..d8a9614 100644 --- a/src/core/llm/generateIssueContent.ts +++ b/src/core/llm/generateIssueContent.ts @@ -1,13 +1,13 @@ // src/core/llm/generateIssueContent.ts import { TodoItem } from '../../parser/types'; -import OpenAI from 'openai'; import * as core from '@actions/core'; +import { chatCompletionWithRetry } from './llmClient'; -const openai = new OpenAI({ - apiKey: core.getInput('openai-api-key'), // correto agora - }); - -const model = core.getInput('openai-model') || 'gpt-3.5-turbo'; +const provider = core.getInput('llm-provider') || 'openai'; +const model = + provider === 'gemini' + ? core.getInput('gemini-model') || 'gemini-1.5-pro' + : core.getInput('openai-model') || 'gpt-3.5-turbo'; export async function generateIssueTitleAndBodyLLM(todo: TodoItem): Promise<{ title: string; body: string }> { const prompt = ` @@ -27,17 +27,20 @@ TITLE: BODY: <detailed body> `; - // πŸ‘‡ Adiciona aqui - core.debug(`[DEBUG] OpenAI key starts with: ${process.env.OPENAI_API_KEY?.slice(0, 5)}`); + core.debug(`[DEBUG] LLM provider: ${provider}`); + if (provider === 'openai') { + core.debug(`[DEBUG] OpenAI key starts with: ${process.env.OPENAI_API_KEY?.slice(0, 5)}`); + } else { + core.debug(`[DEBUG] Gemini key starts with: ${process.env.GEMINI_API_KEY?.slice(0, 5)}`); + } core.debug(`[DEBUG] Using model: ${model}`); - core.debug('[DEBUG] Sending prompt to OpenAI...'); + core.debug('[DEBUG] Sending prompt to LLM...'); try { - const response = await openai.chat.completions.create({ + const response = await chatCompletionWithRetry({ model, messages: [{ role: 'user', content: prompt }], temperature: 0.4, }); - // TODO(priority=high): improve retry logic for API errors const result = response.choices[0].message?.content || ''; const match = result.match(/TITLE:\s*(.+?)\s*BODY:\s*([\s\S]*)/i); @@ -48,7 +51,7 @@ try { const [, title, body] = match; return { title: title.trim(), body: body.trim() }; } catch (err: any) { - console.error('[ERROR] OpenAI call failed:', err); + console.error('[ERROR] LLM call failed:', err); throw err; } } diff --git a/src/core/llm/llmClient.ts b/src/core/llm/llmClient.ts new file mode 100644 index 0000000..b0d7cc3 --- /dev/null +++ b/src/core/llm/llmClient.ts @@ -0,0 +1,54 @@ +// src/core/llm/openaiClient.ts +import OpenAI from 'openai'; +import { GoogleGenAI } from '@google/genai'; +import * as core from '@actions/core'; + +const provider = core.getInput('llm-provider') || 'openai'; + +export const openai = new OpenAI({ + apiKey: core.getInput('openai-api-key'), +}); + +export const gemini = new GoogleGenAI({ + apiKey: core.getInput('gemini-api-key'), +}); + +/** + * Wraps `openai.chat.completions.create` with simple retry logic. + * Retries on failure with exponential backoff. + * + * @param params Parameters forwarded to OpenAI + * @param maxRetries Maximum number of attempts before throwing + */ +export interface ChatCompletionParams { + model: string; + messages: { role: string; content: string }[]; + temperature?: number; +} + +export async function chatCompletionWithRetry( + params: ChatCompletionParams, + maxRetries = 3 +): Promise<{ choices: { message: { content: string } }[] }> { + let attempt = 0; + for (;;) { + try { + if (provider === 'gemini') { + const prompt = params.messages.map(m => m.content).join('\n'); + const response = await gemini.models.generateContent({ + model: params.model, + contents: prompt, + generationConfig: { temperature: params.temperature }, + } as any); + return { choices: [{ message: { content: (response as any).text } }] } as any; + } + return (await openai.chat.completions.create(params as any)) as any; + } catch (err) { + attempt++; + if (attempt > maxRetries) throw err; + const delay = Math.min(1000 * 2 ** attempt, 5000); + core.warning(`LLM request failed (attempt ${attempt}). Retrying in ${delay}ms...`); + await new Promise(res => setTimeout(res, delay)); + } + } +} diff --git a/src/core/llm/openaiClient.ts b/src/core/llm/openaiClient.ts deleted file mode 100644 index cb4b4d9..0000000 --- a/src/core/llm/openaiClient.ts +++ /dev/null @@ -1,7 +0,0 @@ -// src/core/llm/openaiClient.ts -import OpenAI from 'openai'; -import * as core from '@actions/core'; - -export const openai = new OpenAI({ - apiKey: core.getInput('openai-api-key'), -}); diff --git a/src/parser/extractTodos.ts b/src/parser/extractTodos.ts index 9acd06c..e6490fd 100644 --- a/src/parser/extractTodos.ts +++ b/src/parser/extractTodos.ts @@ -4,8 +4,8 @@ import { TodoItem } from './types'; import { normalizeTag } from '../utils/isTextFile'; const COMMENT_PATTERNS = [ - { ext: ['.ts', '.js', '.java', '.go'], pattern: /^\s*\/\/\s*(.*)$/ }, - { ext: ['.py', '.sh', '.rb'], pattern: /^\s*#\s*(.*)$/ }, + { ext: ['.ts', '.js', '.java', '.go', '.c', '.cpp', '.cs', '.rs', '.php', '.h', '.hpp'], pattern: /^\s*\/\/\s*(.*)$/ }, + { ext: ['.py', '.sh', '.rb', '.yaml', '.yml'], pattern: /^\s*#\s*(.*)$/ }, { ext: ['.html', '.xml'], pattern: /<!--\s*(.*?)\s*-->/ } ]; diff --git a/src/parser/extractTodosFromContent.ts b/src/parser/extractTodosFromContent.ts index f3e7b49..0560373 100644 --- a/src/parser/extractTodosFromContent.ts +++ b/src/parser/extractTodosFromContent.ts @@ -4,8 +4,8 @@ import { TodoItem } from './types'; import { normalizeTag } from '../utils/isTextFile'; const COMMENT_PATTERNS = [ - { ext: ['.ts', '.js', '.java', '.go'], pattern: /^\s*\/\/\s*(.*)$/ }, - { ext: ['.py', '.sh', '.rb'], pattern: /^\s*#\s*(.*)$/ }, + { ext: ['.ts', '.js', '.java', '.go', '.c', '.cpp', '.cs', '.rs', '.php', '.h', '.hpp'], pattern: /^\s*\/\/\s*(.*)$/ }, + { ext: ['.py', '.sh', '.rb', '.yaml', '.yml'], pattern: /^\s*#\s*(.*)$/ }, { ext: ['.html', '.xml'], pattern: /<!--\s*(.*?)\s*-->/ } ]; diff --git a/src/parser/extractTodosFromDir.ts b/src/parser/extractTodosFromDir.ts index 0fa248b..f0ca4fb 100644 --- a/src/parser/extractTodosFromDir.ts +++ b/src/parser/extractTodosFromDir.ts @@ -3,7 +3,7 @@ import path from 'path'; import { extractTodosFromFile } from './extractTodos'; import { TodoItem } from './types'; -const SUPPORTED_EXTENSIONS = ['.ts', '.js', '.py', '.go', '.java', '.rb', '.sh', '.html', '.xml']; +const SUPPORTED_EXTENSIONS = ['.ts', '.js', '.py', '.go', '.java', '.rb', '.sh', '.c', '.cpp', '.cs', '.rs', '.php', '.h', '.hpp', '.html', '.xml', '.yaml', '.yml']; const IGNORED_DIRS = ['node_modules', 'dist', 'coverage']; export function extractTodosFromDir(dirPath: string): TodoItem[] { diff --git a/src/utils/isTextFile.ts b/src/utils/isTextFile.ts index 26f69d7..4ac465c 100644 --- a/src/utils/isTextFile.ts +++ b/src/utils/isTextFile.ts @@ -5,8 +5,8 @@ * Useful for filtering files before parsing for TODOs. */ export function isTextFile(filename: string): boolean { - return /\.(ts|js|jsx|tsx|py|rb|sh|go|java|html|css|json|md|txt|xml|yaml|yml)$/i.test(filename); - } + return /\.(ts|js|jsx|tsx|py|rb|sh|go|java|c|cpp|cs|rs|php|h|hpp|html|css|json|md|txt|xml|yaml|yml)$/i.test(filename); +} /** * List of known multilingual aliases for TODO-related tags. diff --git a/tests/commentPatterns.test.ts b/tests/commentPatterns.test.ts index 408f1f3..e16a534 100644 --- a/tests/commentPatterns.test.ts +++ b/tests/commentPatterns.test.ts @@ -2,9 +2,9 @@ import { describe, it, expect } from 'vitest'; import { extractTodosFromString } from '../src/parser/extractTodosFromContent'; describe('extractTodosFromString - comment support by extension', () => { - it('extracts from JS-style (//) for .js/.ts/.go/.java', () => { + it('extracts from JS-style (//) for many languages', () => { const code = `// TODO: js comment\n// BUG: broken`; - const extensions = ['.js', '.ts', '.go', '.java']; + const extensions = ['.js', '.ts', '.go', '.java', '.c', '.cpp', '.rs']; for (const ext of extensions) { const todos = extractTodosFromString(code, ext); @@ -14,9 +14,9 @@ describe('extractTodosFromString - comment support by extension', () => { } }); - it('extracts from Python-style (#) for .py/.sh/.rb', () => { + it('extracts from Python-style (#) for .py/.sh/.rb/.yaml', () => { const code = `# TODO: python comment\n# FIXME: fix me`; - const extensions = ['.py', '.sh', '.rb']; + const extensions = ['.py', '.sh', '.rb', '.yaml']; for (const ext of extensions) { const todos = extractTodosFromString(code, ext); diff --git a/tests/generateIssueContent.test.ts b/tests/generateIssueContent.test.ts index 24a4e7a..d89b0d8 100644 --- a/tests/generateIssueContent.test.ts +++ b/tests/generateIssueContent.test.ts @@ -1,36 +1,37 @@ // tests/generateIssueContent.test.ts import { describe, it, expect, vi, beforeEach } from 'vitest'; -import * as core from '@actions/core'; import OpenAI from 'openai'; -import { generateIssueTitleAndBodyLLM } from '../src/core/llm/generateIssueContent'; + +async function loadGenerator(provider: string) { + vi.resetModules(); + const coreModule = await import('@actions/core'); + (coreModule.getInput as any) = vi.fn((key: string) => { + if (key === 'llm-provider') return provider; + if (key === 'openai-api-key') return 'fake-key'; + if (key === 'openai-model') return 'gpt-3.5-turbo'; + if (key === 'gemini-model') return 'gemini-1.5-pro'; + return ''; + }); + return await import('../src/core/llm/generateIssueContent'); +} import { TodoItem } from '../src/parser/types'; vi.mock('@actions/core'); vi.mock('openai'); +vi.mock('@google/genai'); describe('generateIssueTitleAndBodyLLM', () => { - const mockCore = core as unknown as { getInput: (key: string) => string }; const mockCreate = vi.fn(); beforeEach(() => { vi.clearAllMocks(); - - // Mock `core.getInput` - (mockCore.getInput as any) = vi.fn((key: string) => { - if (key === 'openai-api-key') return 'fake-key'; - if (key === 'openai-model') return 'gpt-3.5-turbo'; - return ''; - }); - - // Mock `openai.chat.completions.create` (OpenAI as unknown as any).prototype.chat = { - completions: { - create: mockCreate, - }, + completions: { create: mockCreate }, }; }); it('should return title and body when OpenAI responds correctly', async () => { + const { generateIssueTitleAndBodyLLM } = await loadGenerator('openai'); const todo: TodoItem = { tag: 'TODO', text: 'Refactor this function', @@ -58,6 +59,7 @@ describe('generateIssueTitleAndBodyLLM', () => { }); it('should throw if response format is invalid', async () => { + const { generateIssueTitleAndBodyLLM } = await loadGenerator('openai'); const todo: TodoItem = { tag: 'TODO', text: 'Missing format', @@ -78,6 +80,9 @@ describe('generateIssueTitleAndBodyLLM', () => { }); it('should throw if OpenAI call fails', async () => { + const { generateIssueTitleAndBodyLLM } = await loadGenerator('openai'); + vi.useFakeTimers(); + const todo: TodoItem = { tag: 'TODO', text: 'Handle error', @@ -88,7 +93,29 @@ describe('generateIssueTitleAndBodyLLM', () => { mockCreate.mockRejectedValue(new Error('API failure')); - await expect(() => generateIssueTitleAndBodyLLM(todo)).rejects.toThrow('API failure'); + const promise = generateIssueTitleAndBodyLLM(todo); + const expectation = expect(promise).rejects.toThrow('API failure'); + await vi.runAllTimersAsync(); + await expectation; + vi.useRealTimers(); + }); + + it('should work with Gemini provider', async () => { + const { generateIssueTitleAndBodyLLM } = await loadGenerator('gemini'); + const todo: TodoItem = { + tag: 'TODO', + text: 'Gemini task', + file: 'src/file.ts', + line: 5, + metadata: {}, + }; + + (await import('@google/genai')).GoogleGenAI.prototype.models = { + generateContent: vi.fn().mockResolvedValue({ text: 'TITLE: G\nBODY: B' }), + } as any; + + const result = await generateIssueTitleAndBodyLLM(todo); + expect(result).toEqual({ title: 'G', body: 'B' }); }); }); diff --git a/tests/isTextFile.test.ts b/tests/isTextFile.test.ts new file mode 100644 index 0000000..6cc0d3e --- /dev/null +++ b/tests/isTextFile.test.ts @@ -0,0 +1,15 @@ +import { describe, it, expect } from 'vitest'; +import { isTextFile } from '../src/utils/isTextFile'; + +describe('isTextFile', () => { + it('returns true for supported extensions', () => { + const files = ['main.c', 'module.cpp', 'program.rs', 'config.yaml']; + for (const f of files) { + expect(isTextFile(f)).toBe(true); + } + }); + + it('returns false for unsupported extensions', () => { + expect(isTextFile('image.png')).toBe(false); + }); +}); diff --git a/tests/llmClient.test.ts b/tests/llmClient.test.ts new file mode 100644 index 0000000..5e69ea8 --- /dev/null +++ b/tests/llmClient.test.ts @@ -0,0 +1,45 @@ +import { describe, it, expect, vi } from 'vitest'; + +vi.mock('@google/genai'); +vi.mock('openai'); + +async function loadClient(provider: string) { + vi.resetModules(); + const core = await import('@actions/core'); + (core.getInput as any) = vi.fn((key: string) => (key === 'llm-provider' ? provider : '')); + return await import('../src/core/llm/llmClient'); +} + +describe('chatCompletionWithRetry', () => { + it('retries and succeeds with OpenAI', async () => { + vi.useFakeTimers(); + const { openai, chatCompletionWithRetry } = await loadClient('openai'); + const mockCreate = vi.fn() + .mockRejectedValueOnce(new Error('fail')) + .mockResolvedValue({ choices: [{ message: { content: 'ok' } }] }); + (openai as any).chat = { completions: { create: mockCreate } }; + + const promise = chatCompletionWithRetry({ model: 'gpt', messages: [] }, 2); + await vi.runAllTimersAsync(); + const result = await promise; + expect(mockCreate).toHaveBeenCalledTimes(2); + expect(result.choices[0].message.content).toBe('ok'); + vi.useRealTimers(); + }); + + it('retries and succeeds with Gemini', async () => { + vi.useFakeTimers(); + const { gemini, chatCompletionWithRetry } = await loadClient('gemini'); + const mockGen = vi.fn() + .mockRejectedValueOnce(new Error('fail')) + .mockResolvedValue({ text: 'ok' }); + (gemini as any).models = { generateContent: mockGen }; + + const promise = chatCompletionWithRetry({ model: 'any', messages: [{ role: 'user', content: 'hi' }] }, 2); + await vi.runAllTimersAsync(); + const result = await promise; + expect(mockGen).toHaveBeenCalledTimes(2); + expect(result.choices[0].message.content).toBe('ok'); + vi.useRealTimers(); + }); +}); diff --git a/yarn.lock b/yarn.lock index 028ffe0..56b1dcc 100644 --- a/yarn.lock +++ b/yarn.lock @@ -626,6 +626,20 @@ __metadata: languageName: node linkType: hard +"@google/genai@npm:^1.4.0": + version: 1.4.0 + resolution: "@google/genai@npm:1.4.0" + dependencies: + google-auth-library: "npm:^9.14.2" + ws: "npm:^8.18.0" + zod: "npm:^3.22.4" + zod-to-json-schema: "npm:^3.22.4" + peerDependencies: + "@modelcontextprotocol/sdk": ^1.11.0 + checksum: 10c0/682bc46c7af940fed178bfb13d39ca210910ab3aa524eb51cace7122b7590b2cfe8b9229570e8eca47729cfb5b82e5a0619c3595431ab1f354c93c8b2f148309 + languageName: node + linkType: hard + "@isaacs/cliui@npm:^8.0.2": version: 8.0.2 resolution: "@isaacs/cliui@npm:8.0.2" @@ -1682,6 +1696,13 @@ __metadata: languageName: node linkType: hard +"base64-js@npm:^1.3.0": + version: 1.5.1 + resolution: "base64-js@npm:1.5.1" + checksum: 10c0/f23823513b63173a001030fae4f2dabe283b99a9d324ade3ad3d148e218134676f1ee8568c877cd79ec1c53158dcf2d2ba527a97c606618928ba99dd930102bf + languageName: node + linkType: hard + "before-after-hook@npm:^2.2.0": version: 2.2.3 resolution: "before-after-hook@npm:2.2.3" @@ -1696,6 +1717,13 @@ __metadata: languageName: node linkType: hard +"bignumber.js@npm:^9.0.0": + version: 9.3.0 + resolution: "bignumber.js@npm:9.3.0" + checksum: 10c0/f54a79cd6fc98552ac0510c1cd9381650870ae443bdb20ba9b98e3548188d941506ac3c22a9f9c69b2cc60da9be5700e87d3f54d2825310a8b2ae999dfd6d99d + languageName: node + linkType: hard + "brace-expansion@npm:^1.1.7": version: 1.1.11 resolution: "brace-expansion@npm:1.1.11" @@ -1747,6 +1775,13 @@ __metadata: languageName: node linkType: hard +"buffer-equal-constant-time@npm:^1.0.1": + version: 1.0.1 + resolution: "buffer-equal-constant-time@npm:1.0.1" + checksum: 10c0/fb2294e64d23c573d0dd1f1e7a466c3e978fe94a4e0f8183937912ca374619773bef8e2aceb854129d2efecbbc515bbd0cc78d2734a3e3031edb0888531bbc8e + languageName: node + linkType: hard + "cac@npm:^6.7.14": version: 6.7.14 resolution: "cac@npm:6.7.14" @@ -1983,6 +2018,15 @@ __metadata: languageName: node linkType: hard +"ecdsa-sig-formatter@npm:1.0.11, ecdsa-sig-formatter@npm:^1.0.11": + version: 1.0.11 + resolution: "ecdsa-sig-formatter@npm:1.0.11" + dependencies: + safe-buffer: "npm:^5.0.1" + checksum: 10c0/ebfbf19d4b8be938f4dd4a83b8788385da353d63307ede301a9252f9f7f88672e76f2191618fd8edfc2f24679236064176fab0b78131b161ee73daa37125408c + languageName: node + linkType: hard + "electron-to-chromium@npm:^1.5.73": version: 1.5.138 resolution: "electron-to-chromium@npm:1.5.138" @@ -2222,6 +2266,13 @@ __metadata: languageName: node linkType: hard +"extend@npm:^3.0.2": + version: 3.0.2 + resolution: "extend@npm:3.0.2" + checksum: 10c0/73bf6e27406e80aa3e85b0d1c4fd987261e628064e170ca781125c0b635a3dabad5e05adbf07595ea0cf1e6c5396cacb214af933da7cbaf24fe75ff14818e8f9 + languageName: node + linkType: hard + "fast-content-type-parse@npm:^2.0.0": version: 2.0.1 resolution: "fast-content-type-parse@npm:2.0.1" @@ -2388,6 +2439,30 @@ __metadata: languageName: node linkType: hard +"gaxios@npm:^6.0.0, gaxios@npm:^6.1.1": + version: 6.7.1 + resolution: "gaxios@npm:6.7.1" + dependencies: + extend: "npm:^3.0.2" + https-proxy-agent: "npm:^7.0.1" + is-stream: "npm:^2.0.0" + node-fetch: "npm:^2.6.9" + uuid: "npm:^9.0.1" + checksum: 10c0/53e92088470661c5bc493a1de29d05aff58b1f0009ec5e7903f730f892c3642a93e264e61904383741ccbab1ce6e519f12a985bba91e13527678b32ee6d7d3fd + languageName: node + linkType: hard + +"gcp-metadata@npm:^6.1.0": + version: 6.1.1 + resolution: "gcp-metadata@npm:6.1.1" + dependencies: + gaxios: "npm:^6.1.1" + google-logging-utils: "npm:^0.0.2" + json-bigint: "npm:^1.0.0" + checksum: 10c0/71f6ad4800aa622c246ceec3955014c0c78cdcfe025971f9558b9379f4019f5e65772763428ee8c3244fa81b8631977316eaa71a823493f82e5c44d7259ffac8 + languageName: node + linkType: hard + "gensync@npm:^1.0.0-beta.2": version: 1.0.0-beta.2 resolution: "gensync@npm:1.0.0-beta.2" @@ -2467,6 +2542,27 @@ __metadata: languageName: node linkType: hard +"google-auth-library@npm:^9.14.2": + version: 9.15.1 + resolution: "google-auth-library@npm:9.15.1" + dependencies: + base64-js: "npm:^1.3.0" + ecdsa-sig-formatter: "npm:^1.0.11" + gaxios: "npm:^6.1.1" + gcp-metadata: "npm:^6.1.0" + gtoken: "npm:^7.0.0" + jws: "npm:^4.0.0" + checksum: 10c0/6eef36d9a9cb7decd11e920ee892579261c6390104b3b24d3e0f3889096673189fe2ed0ee43fd563710e2560de98e63ad5aa4967b91e7f4e69074a422d5f7b65 + languageName: node + linkType: hard + +"google-logging-utils@npm:^0.0.2": + version: 0.0.2 + resolution: "google-logging-utils@npm:0.0.2" + checksum: 10c0/9a4bbd470dd101c77405e450fffca8592d1d7114f245a121288d04a957aca08c9dea2dd1a871effe71e41540d1bb0494731a0b0f6fea4358e77f06645e4268c1 + languageName: node + linkType: hard + "gopd@npm:^1.2.0": version: 1.2.0 resolution: "gopd@npm:1.2.0" @@ -2481,6 +2577,16 @@ __metadata: languageName: node linkType: hard +"gtoken@npm:^7.0.0": + version: 7.1.0 + resolution: "gtoken@npm:7.1.0" + dependencies: + gaxios: "npm:^6.0.0" + jws: "npm:^4.0.0" + checksum: 10c0/0a3dcacb1a3c4578abe1ee01c7d0bf20bffe8ded3ee73fc58885d53c00f6eb43b4e1372ff179f0da3ed5cfebd5b7c6ab8ae2776f1787e90d943691b4fe57c716 + languageName: node + linkType: hard + "has-flag@npm:^4.0.0": version: 4.0.0 resolution: "has-flag@npm:4.0.0" @@ -2613,6 +2719,13 @@ __metadata: languageName: node linkType: hard +"is-stream@npm:^2.0.0": + version: 2.0.1 + resolution: "is-stream@npm:2.0.1" + checksum: 10c0/7c284241313fc6efc329b8d7f08e16c0efeb6baab1b4cd0ba579eb78e5af1aa5da11e68559896a2067cd6c526bd29241dda4eb1225e627d5aa1a89a76d4635a5 + languageName: node + linkType: hard + "isexe@npm:^2.0.0": version: 2.0.0 resolution: "isexe@npm:2.0.0" @@ -2870,6 +2983,15 @@ __metadata: languageName: node linkType: hard +"json-bigint@npm:^1.0.0": + version: 1.0.0 + resolution: "json-bigint@npm:1.0.0" + dependencies: + bignumber.js: "npm:^9.0.0" + checksum: 10c0/e3f34e43be3284b573ea150a3890c92f06d54d8ded72894556357946aeed9877fd795f62f37fe16509af189fd314ab1104d0fd0f163746ad231b9f378f5b33f4 + languageName: node + linkType: hard + "json5@npm:^2.2.3": version: 2.2.3 resolution: "json5@npm:2.2.3" @@ -2879,6 +3001,27 @@ __metadata: languageName: node linkType: hard +"jwa@npm:^2.0.0": + version: 2.0.1 + resolution: "jwa@npm:2.0.1" + dependencies: + buffer-equal-constant-time: "npm:^1.0.1" + ecdsa-sig-formatter: "npm:1.0.11" + safe-buffer: "npm:^5.0.1" + checksum: 10c0/ab3ebc6598e10dc11419d4ed675c9ca714a387481466b10e8a6f3f65d8d9c9237e2826f2505280a739cf4cbcf511cb288eeec22b5c9c63286fc5a2e4f97e78cf + languageName: node + linkType: hard + +"jws@npm:^4.0.0": + version: 4.0.0 + resolution: "jws@npm:4.0.0" + dependencies: + jwa: "npm:^2.0.0" + safe-buffer: "npm:^5.0.1" + checksum: 10c0/f1ca77ea5451e8dc5ee219cb7053b8a4f1254a79cb22417a2e1043c1eb8a569ae118c68f24d72a589e8a3dd1824697f47d6bd4fb4bebb93a3bdf53545e721661 + languageName: node + linkType: hard + "locate-path@npm:^5.0.0": version: 5.0.0 resolution: "locate-path@npm:5.0.0" @@ -3155,7 +3298,7 @@ __metadata: languageName: node linkType: hard -"node-fetch@npm:^2.6.7": +"node-fetch@npm:^2.6.7, node-fetch@npm:^2.6.9": version: 2.7.0 resolution: "node-fetch@npm:2.7.0" dependencies: @@ -3513,6 +3656,13 @@ __metadata: languageName: node linkType: hard +"safe-buffer@npm:^5.0.1": + version: 5.2.1 + resolution: "safe-buffer@npm:5.2.1" + checksum: 10c0/6501914237c0a86e9675d4e51d89ca3c21ffd6a31642efeba25ad65720bce6921c9e7e974e5be91a786b25aa058b5303285d3c15dbabf983a919f5f630d349f3 + languageName: node + linkType: hard + "safer-buffer@npm:>= 2.1.2 < 3.0.0": version: 2.1.2 resolution: "safer-buffer@npm:2.1.2" @@ -3604,6 +3754,7 @@ __metadata: dependencies: "@actions/core": "npm:^1.10.0" "@actions/github": "npm:^6.0.1" + "@google/genai": "npm:^1.4.0" "@jest/globals": "npm:^29.7.0" "@octokit/rest": "npm:^21.1.1" "@types/jest": "npm:^29.5.14" @@ -4007,6 +4158,15 @@ __metadata: languageName: node linkType: hard +"uuid@npm:^9.0.1": + version: 9.0.1 + resolution: "uuid@npm:9.0.1" + bin: + uuid: dist/bin/uuid + checksum: 10c0/1607dd32ac7fc22f2d8f77051e6a64845c9bce5cd3dd8aa0070c074ec73e666a1f63c7b4e0f4bf2bc8b9d59dc85a15e17807446d9d2b17c8485fbc2147b27f9b + languageName: node + linkType: hard + "v8-compile-cache-lib@npm:^3.0.1": version: 3.0.1 resolution: "v8-compile-cache-lib@npm:3.0.1" @@ -4250,6 +4410,21 @@ __metadata: languageName: node linkType: hard +"ws@npm:^8.18.0": + version: 8.18.2 + resolution: "ws@npm:8.18.2" + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ">=5.0.2" + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + checksum: 10c0/4b50f67931b8c6943c893f59c524f0e4905bbd183016cfb0f2b8653aa7f28dad4e456b9d99d285bbb67cca4fedd9ce90dfdfaa82b898a11414ebd66ee99141e4 + languageName: node + linkType: hard + "yallist@npm:^3.0.2": version: 3.1.1 resolution: "yallist@npm:3.1.1" @@ -4277,3 +4452,19 @@ __metadata: checksum: 10c0/0732468dd7622ed8a274f640f191f3eaf1f39d5349a1b72836df484998d7d9807fbea094e2f5486d6b0cd2414aad5775972df0e68f8604db89a239f0f4bf7443 languageName: node linkType: hard + +"zod-to-json-schema@npm:^3.22.4": + version: 3.24.5 + resolution: "zod-to-json-schema@npm:3.24.5" + peerDependencies: + zod: ^3.24.1 + checksum: 10c0/0745b94ba53e652d39f262641cdeb2f75d24339fb6076a38ce55bcf53d82dfaea63adf524ebc5f658681005401687f8e9551c4feca7c4c882e123e66091dfb90 + languageName: node + linkType: hard + +"zod@npm:^3.22.4": + version: 3.25.62 + resolution: "zod@npm:3.25.62" + checksum: 10c0/a98e53eddd689913a1d476ccfd3fd3ffbdf56c8e8606449445e4a36f31064551760c930c62381420605394e71b00d3397ea456637bacc7d2afe7c4946bf9c6b0 + languageName: node + linkType: hard