Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions __tests__/anthropic-senders/line-comments-sender.test.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { anthropicLineCommentsSender } from '../../src/senders/providers/anthropic/line-comments-sender.ts'
import { _resetRuntimeConfigCacheForTests } from '../../src/core/utils/runtime-config.ts'

// Mock the Anthropic SDK
vi.mock('@anthropic-ai/sdk', () => {
Expand Down Expand Up @@ -127,6 +128,7 @@ describe('anthropicLineCommentsSender', () => {

beforeEach(async () => {
vi.clearAllMocks()
_resetRuntimeConfigCacheForTests()

// Get the mocked Anthropic constructor
const AnthropicConstructor = (await import('@anthropic-ai/sdk'))
Expand Down
4 changes: 3 additions & 1 deletion __tests__/comment-handlers/discussion-handler.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,8 @@ describe('handleDiscussionReply', () => {
expect(client.replyToReviewComment).toHaveBeenCalledTimes(1)
const [[, , body]] = (client.replyToReviewComment as Mock).mock.calls
expect(typeof body).toBe('string')
expect(body).toContain('I could not generate a confident, useful automated reply')
expect(body).toContain(
'I could not generate a confident, useful automated reply'
)
})
})
12 changes: 8 additions & 4 deletions __tests__/config-loader.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,14 @@ describe('getAppConfig / LLM provider resolution', () => {
expect(cfg.llmProvider).toBe('anthropic')
})

it('uses LLM_PROVIDER when config.json is missing', async () => {
it('ignores LLM_PROVIDER when config.json is missing (provider resolved at runtime layer)', async () => {
process.env.LLM_PROVIDER = 'openai'
vi.mocked(fs.readFile).mockRejectedValueOnce(new Error('no file'))

const cfg = await getAppConfig()
expect(cfg.llmProvider).toBe('openai')
// getAppConfig now only reflects config.json + defaults; provider env
// resolution is handled in the runtime-config layer.
expect(cfg.llmProvider).toBe('anthropic')
})

it('prefers config.json llmProvider over env', async () => {
Expand All @@ -65,7 +67,7 @@ describe('getAppConfig / LLM provider resolution', () => {
expect(cfg.llmProvider).toBe('anthropic')
})

it('falls back to env when config.json omits llmProvider', async () => {
it('does not fall back to env when config.json omits llmProvider (runtime layer handles env)', async () => {
process.env.LLM_PROVIDER = 'openai'

const fileCfg: Partial<RevuAppConfig> = {
Expand All @@ -74,7 +76,9 @@ describe('getAppConfig / LLM provider resolution', () => {
vi.mocked(fs.readFile).mockResolvedValueOnce(JSON.stringify(fileCfg))

const cfg = await getAppConfig()
expect(cfg.llmProvider).toBe('openai')
// getAppConfig remains config.json + defaults; env-based provider
// fallback is handled by runtime-config.
expect(cfg.llmProvider).toBe('anthropic')
})

it('logs a warning and ignores invalid LLM_PROVIDER', async () => {
Expand Down
2 changes: 2 additions & 0 deletions __tests__/openai-senders/line-comments-sender.test.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'
import { openaiLineCommentsSender } from '../../src/senders/providers/openai/line-comments-sender.ts'
import { _resetRuntimeConfigCacheForTests } from '../../src/core/utils/runtime-config.ts'

// Mock the OpenAI SDK
const createMock = vi.fn()
Expand All @@ -17,6 +18,7 @@ describe('openaiLineCommentsSender', () => {
beforeEach(async () => {
vi.resetModules()
createMock.mockReset()
_resetRuntimeConfigCacheForTests()
// Ensure env is set for tests
process.env.OPENAI_API_KEY = 'test-openai-key'
delete process.env.OPENAI_MODEL
Expand Down
15 changes: 10 additions & 5 deletions src/comment-handlers/discussion-handler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import {
simpleHash
} from '../utils/compute-cache.ts'
import { logSystemError, logSystemWarning } from '../utils/logger.ts'
import { getRuntimeConfig } from '../core/utils/runtime-config.ts'

export interface ThreadMessage {
author: string
Expand Down Expand Up @@ -113,11 +114,14 @@ export async function handleDiscussionReply(params: DiscussionHandlerParams) {
? userReplyBody.slice(0, MAX_REPLY_HASH_CHARS)
: userReplyBody
const bodyHash = simpleHash(truncatedForHash, 16)
const provider = process.env.LLM_PROVIDER || 'anthropic'

const runtime = await getRuntimeConfig()
const provider = runtime.llm.provider
const modelForKey =
provider === 'openai'
? process.env.OPENAI_MODEL || 'gpt-5'
: process.env.ANTHROPIC_MODEL || 'claude-sonnet-4-5-20250929'
? runtime.llm.openai.model
: runtime.llm.anthropic.model

const cacheKey = buildDiscussionCacheKey({
owner,
repo,
Expand Down Expand Up @@ -194,7 +198,7 @@ export async function handleDiscussionReply(params: DiscussionHandlerParams) {

let acquired = false
if (hasLockSupport) {
const lockTtl = Number(process.env.DISCUSSION_LOCK_TTL_SECONDS || 240)
const lockTtl = runtime.discussion.lockTtlSeconds
acquired = await cacheAny.tryAcquireLock(lockKey, lockTtl)
}

Expand Down Expand Up @@ -282,7 +286,8 @@ export async function handleDiscussionReply(params: DiscussionHandlerParams) {
userReplyBody: replyForPrompt,
history,
relevantFilePath,
diffHunk
diffHunk,
maxCharsPerFile: runtime.discussion.maxFileContentChars
})

// Generate assistant reply (concise)
Expand Down
36 changes: 0 additions & 36 deletions src/core/utils/config-loader.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ import * as fs from 'fs/promises'
import * as path from 'path'
import { DEFAULT_APP_CONFIG, type RevuAppConfig } from '../../types/config.ts'
import { logSystemWarning } from '../../utils/logger.ts'
import { applyRevuAppEnvOverrides } from './env-config.ts'
/**
* Reads the application configuration from config.json
*/
Expand All @@ -23,50 +22,15 @@ export async function getAppConfig(): Promise<RevuAppConfig> {
...fileConfig
}

applyRevuAppEnvOverrides(merged, fileConfig, [
{
key: 'llmProvider',
envVar: 'LLM_PROVIDER',
parse: (raw) => raw.toLowerCase() as RevuAppConfig['llmProvider'],
validate: (value) => value === 'anthropic' || value === 'openai',
onInvalid: (raw) => {
logSystemWarning(
new Error(
'Invalid LLM_PROVIDER env var, expected "anthropic" or "openai"'
),
{ context_msg: `value="${raw}"` }
)
}
}
])

cachedConfig = merged
return cachedConfig
} catch (error) {
logSystemWarning('Failed to read config.json, using defaults:', error)

const fileConfig: Partial<RevuAppConfig> = {}
const merged: RevuAppConfig = {
...DEFAULT_APP_CONFIG
}

applyRevuAppEnvOverrides(merged, fileConfig, [
{
key: 'llmProvider',
envVar: 'LLM_PROVIDER',
parse: (raw) => raw.toLowerCase() as RevuAppConfig['llmProvider'],
validate: (value) => value === 'anthropic' || value === 'openai',
onInvalid: (raw) => {
logSystemWarning(
new Error(
'Invalid LLM_PROVIDER env var, expected "anthropic" or "openai"'
),
{ context_msg: `value="${raw}"` }
)
}
}
])

cachedConfig = merged
return cachedConfig
}
Expand Down
Loading