Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions src/config/index.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,10 @@ export const chatgptApiModelKeys = [
'chatgptApi4_1',
'chatgptApi4_1_mini',
'chatgptApi4_1_nano',
'chatgptApiO4Mini',
'chatgptApiGpt5',
'chatgptApiGpt5Mini',
'chatgptApiGpt5Nano',
]
export const customApiModelKeys = ['customModel']
export const ollamaApiModelKeys = ['ollamaModel']
Expand Down Expand Up @@ -256,6 +260,11 @@ export const Models = {
chatgptApi4_1_mini: { value: 'gpt-4.1-mini', desc: 'ChatGPT (GPT-4.1 mini)' },
chatgptApi4_1_nano: { value: 'gpt-4.1-nano', desc: 'ChatGPT (GPT-4.1 nano)' },

chatgptApiO4Mini: { value: 'o4-mini', desc: 'ChatGPT (o4-mini)' },
chatgptApiGpt5: { value: 'gpt-5', desc: 'ChatGPT (gpt-5)' },
chatgptApiGpt5Mini: { value: 'gpt-5-mini', desc: 'ChatGPT (gpt-5-mini)' },
chatgptApiGpt5Nano: { value: 'gpt-5-nano', desc: 'ChatGPT (gpt-5-nano)' },

claude2WebFree: { value: '', desc: 'Claude.ai (Web)' },
claude12Api: { value: 'claude-instant-1.2', desc: 'Claude.ai (API, Claude Instant 1.2)' },
claude2Api: { value: 'claude-2.0', desc: 'Claude.ai (API, Claude 2)' },
Expand Down Expand Up @@ -541,6 +550,10 @@ export const defaultConfig = {
'openRouter_anthropic_claude_sonnet4',
'openRouter_google_gemini_2_5_pro',
'openRouter_openai_o3',
'chatgptApiO4Mini',
'chatgptApiGpt5',
'chatgptApiGpt5Mini',
'chatgptApiGpt5Nano',
],
customApiModes: [
{
Expand Down
176 changes: 149 additions & 27 deletions src/services/apis/openai-api.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,38 @@ import { fetchSSE } from '../../utils/fetch-sse.mjs'
import { getConversationPairs } from '../../utils/get-conversation-pairs.mjs'
import { isEmpty } from 'lodash-es'
import { getCompletionPromptBase, pushRecord, setAbortController } from './shared.mjs'
import { getModelValue } from '../../utils/model-name-convert.mjs'
import { getModelValue, isUsingReasoningModel } from '../../utils/model-name-convert.mjs'

/**
* Extract content from structured response arrays for reasoning models
* @param {Array} contentArray - Array of content segments
* @returns {string} - Extracted text content
*/
function extractContentFromArray(contentArray) {
if (!Array.isArray(contentArray)) {
console.debug('Content is not an array, returning empty string')
return ''
}

try {
const parts = contentArray
.map((part) => {
if (typeof part === 'string') return part
if (part && typeof part === 'object') {
// Prefer output_text segments; fallback to text property
if (typeof part.output_text === 'string') return part.output_text
if (typeof part.text === 'string') return part.text
}
return ''
})
.filter(Boolean)

return parts.join('')
} catch (error) {
console.error('Error extracting content from array:', error)
return ''
}
}

/**
* @param {Browser.Runtime.Port} port
Expand Down Expand Up @@ -65,10 +96,16 @@ export async function generateAnswersWithGptCompletionApi(port, question, sessio
return
}

answer += data.choices[0].text
const choice = data.choices?.[0]
if (!choice) {
console.debug('No choice in response data')
return
}

answer += choice.text
port.postMessage({ answer: answer, done: false, session: null })

if (data.choices[0]?.finish_reason) {
if (choice.finish_reason) {
finish()
return
}
Expand Down Expand Up @@ -116,37 +153,82 @@ export async function generateAnswersWithChatgptApiCompat(
) {
const { controller, messageListener, disconnectListener } = setAbortController(port)
const model = getModelValue(session)
const isReasoningModel = isUsingReasoningModel(session)

const config = await getUserConfig()
const prompt = getConversationPairs(
session.conversationRecords.slice(-config.maxConversationContextLength),
false,
)
prompt.push({ role: 'user', content: question })

// Filter messages based on model type
// Reasoning models only support 'user' and 'assistant' roles during beta period
const filteredPrompt = isReasoningModel
? prompt.filter((msg) => {
const role = msg?.role
return role === 'user' || role === 'assistant'
})
: prompt

filteredPrompt.push({ role: 'user', content: question })

let answer = ''
let finished = false
const finish = () => {
if (finished) return
finished = true
Comment on lines 176 to 179
Copy link

Copilot AI Aug 30, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The finish function checks and sets the 'finished' flag without synchronization, which could lead to race conditions in concurrent scenarios. Consider using atomic operations or proper synchronization to ensure thread safety.

Suggested change
let finished = false
const finish = () => {
if (finished) return
finished = true
// Use an atomic flag for thread safety
const finishedBuffer = new SharedArrayBuffer(4);
const finishedFlag = new Int32Array(finishedBuffer);
const finish = () => {
// Atomically check and set the finished flag
if (Atomics.compareExchange(finishedFlag, 0, 0, 1) !== 0) return;

Copilot uses AI. Check for mistakes.

pushRecord(session, question, answer)
console.debug('conversation history', { content: session.conversationRecords })
port.postMessage({ answer: null, done: true, session: session })
port.postMessage({ answer: null, done: true, session })
}

// Build request body with reasoning model-specific parameters
const requestBody = {
messages: filteredPrompt,
model,
...extraBody,
}

// Apply model-specific configurations
if (isReasoningModel) {
// Reasoning models use max_completion_tokens instead of max_tokens
requestBody.max_completion_tokens = config.maxResponseTokenLength
// Reasoning models don't support streaming during beta
requestBody.stream = false
// Reasoning models have fixed parameters during beta
requestBody.temperature = 1
requestBody.top_p = 1
requestBody.n = 1
requestBody.presence_penalty = 0
requestBody.frequency_penalty = 0
// Remove unsupported parameters for reasoning models
delete requestBody.tools
delete requestBody.tool_choice
delete requestBody.functions
delete requestBody.function_call
delete requestBody.max_tokens // Ensure max_tokens is not present
} else {
// Non-reasoning models use the existing behavior
requestBody.stream = true
requestBody.max_tokens = config.maxResponseTokenLength
requestBody.temperature = config.temperature
}

// Validate API key with detailed error message
if (!apiKey || typeof apiKey !== 'string' || !apiKey.trim()) {
throw new Error(
'Invalid or empty API key provided. Please check your OpenAI API key configuration.',
)
}

await fetchSSE(`${baseUrl}/chat/completions`, {
method: 'POST',
signal: controller.signal,
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
Authorization: `Bearer ${apiKey.trim()}`,
},
body: JSON.stringify({
messages: prompt,
model,
stream: true,
max_tokens: config.maxResponseTokenLength,
temperature: config.temperature,
...extraBody,
}),
body: JSON.stringify(requestBody),
onMessage(message) {
console.debug('sse message', message)
if (finished) return
Expand All @@ -162,21 +244,61 @@ export async function generateAnswersWithChatgptApiCompat(
return
}

const delta = data.choices[0]?.delta?.content
const content = data.choices[0]?.message?.content
const text = data.choices[0]?.text
if (delta !== undefined) {
answer += delta
} else if (content) {
answer = content
} else if (text) {
answer += text
// Validate response structure early
const choice = data.choices?.[0]
if (!choice) {
console.debug('No choice in response data')
return
}
port.postMessage({ answer: answer, done: false, session: null })

if (data.choices[0]?.finish_reason) {
finish()
return
if (isReasoningModel) {
// For reasoning models (non-streaming), get the complete response
let content = choice.message?.content ?? choice.text

// Handle structured response arrays for reasoning models
if (Array.isArray(content)) {
content = extractContentFromArray(content)
}

// Ensure content is a string and not empty
if (content && typeof content === 'string') {
const trimmedContent = content.trim()
if (trimmedContent) {
answer = trimmedContent
port.postMessage({ answer, done: false, session: null })
}
} else if (content) {
// Handle unexpected content types gracefully
console.debug('Unexpected content type for reasoning model:', typeof content)
const stringContent = String(content).trim()
if (stringContent) {
answer = stringContent
port.postMessage({ answer, done: false, session: null })
}
}

// Only finish when we have a proper finish reason
if (choice.finish_reason) {
finish()
}
} else {
// For non-reasoning models (streaming), handle delta content
const delta = choice.delta?.content
const content = choice.message?.content
const text = choice.text
if (delta !== undefined) {
answer += delta
} else if (content) {
answer = content
} else if (text) {
answer += text
}
port.postMessage({ answer, done: false, session: null })

if (choice.finish_reason) {
finish()
return
}
}
},
async onStart() {},
Expand Down
29 changes: 29 additions & 0 deletions src/utils/model-name-convert.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -164,3 +164,32 @@ export function isInApiModeGroup(apiModeGroup, configOrSession) {
const [, { value: groupValue }] = foundGroup
return groupValue === apiModeGroup
}

export function isUsingReasoningModel(configOrSession) {
const modelValue = getModelValue(configOrSession)
if (!modelValue || typeof modelValue !== 'string') return false

// Normalize model value to handle potential whitespace
const normalizedModelValue = modelValue.trim().toLowerCase()

// Match o1, o3, or o4 models with optional standard OpenAI suffixes
// Uses word boundaries to prevent false positives like o10, o30, o40
// Allows: o1, o1-preview, o1-mini, o3, o3-mini, o4, o4-mini, etc.
// Prevents: o10, o30, o40, o1x, o3x, o4x, and other invalid patterns
if (
/^o[134](?:$|-(?:preview|mini|turbo|instruct|nano|small|medium|large))$/.test(
normalizedModelValue,
)
) {
return true
}

// Match gpt-5* pattern but exclude gpt-5-chat-* variants
// Allows: gpt-5, gpt-5-mini, gpt-5-nano, gpt-5-preview, gpt-5-turbo
// Prevents: gpt-5-chat-latest, gpt-5-chat, etc.
if (normalizedModelValue.startsWith('gpt-5') && !normalizedModelValue.startsWith('gpt-5-chat')) {
return true
}

return false
}