Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
159 changes: 159 additions & 0 deletions backend/example-fake-stream-usage.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
#!/usr/bin/env node

/**
* Example usage of the fake stream method
* This shows different ways to use the fake streaming for debugging
*/

import aiService from './services/aiService.js';

async function example1_BasicUsage() {
console.log('📝 Example 1: Basic Fake Stream Usage\n');

const stream = await aiService.streamGeminiFake(
"What is the capital of France?",
[],
"gemini-1.5-flash",
"example-session-1"
);

console.log('Streaming response:');
for await (const chunk of stream) {
process.stdout.write(chunk.text);
}
console.log('\n');
}

async function example2_WithConversationHistory() {
console.log('📝 Example 2: With Conversation History\n');

const conversationHistory = [
{ role: 'user', content: 'Hello, I need help with JavaScript.' },
{ role: 'assistant', content: 'I\'d be happy to help you with JavaScript! What specific topic would you like to know about?' }
];

const stream = await aiService.streamGeminiFake(
"Can you explain closures?",
conversationHistory,
"gemini-1.5-flash",
"example-session-2"
);

console.log('Streaming response with context:');
for await (const chunk of stream) {
process.stdout.write(chunk.text);
}
console.log('\n');
}

async function example3_WithMCPTools() {
console.log('📝 Example 3: With MCP Tools Enabled\n');

const stream = await aiService.streamGeminiFake(
"Can you help me with a task?",
[],
"gemini-1.5-flash",
"example-session-3",
[],
true // Enable MCP tools simulation
);

console.log('Streaming response with potential tool calls:');
for await (const chunk of stream) {
if (chunk.toolCall) {
console.log(`\n🔧 Tool Call: ${chunk.toolCall.name}`);
console.log(` Parameters:`, chunk.toolCall.parameters);
} else {
process.stdout.write(chunk.text);
}
}
console.log('\n');
}

async function example4_ErrorHandling() {
console.log('📝 Example 4: Error Handling\n');

try {
const stream = await aiService.streamGeminiFake(
"This is a test message",
[],
"gemini-1.5-flash",
"example-session-4"
);

console.log('Streaming response:');
for await (const chunk of stream) {
process.stdout.write(chunk.text);
}
console.log('\n');
} catch (error) {
console.error('Error occurred:', error.message);
}
}

async function example5_ChunkAnalysis() {
console.log('📝 Example 5: Detailed Chunk Analysis\n');

const stream = await aiService.streamGeminiFake(
"Analyze this streaming response in detail.",
[],
"gemini-1.5-flash",
"example-session-5"
);

let chunkCount = 0;
let totalLength = 0;
const chunks = [];

console.log('Detailed chunk analysis:');
console.log('─'.repeat(60));

for await (const chunk of stream) {
chunkCount++;
totalLength += chunk.text.length;
chunks.push({
index: chunk.chunkIndex,
text: chunk.text.trim(),
length: chunk.text.length,
timestamp: chunk.timestamp
});

console.log(`Chunk ${chunkCount}: "${chunk.text.trim()}" (${chunk.text.length} chars)`);
}

console.log('─'.repeat(60));
console.log(`Total chunks: ${chunkCount}`);
console.log(`Total length: ${totalLength} characters`);
console.log(`Average chunk size: ${Math.round(totalLength / chunkCount)} characters`);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

There's a potential division by zero error here if chunkCount is 0, which would happen if the stream is empty. It's good practice to add a guard to prevent this.

Suggested change
console.log(`Average chunk size: ${Math.round(totalLength / chunkCount)} characters`);
console.log(`Average chunk size: ${chunkCount > 0 ? Math.round(totalLength / chunkCount) : 0} characters`);

console.log(`First chunk: "${chunks[0]?.text}"`);
console.log(`Last chunk: "${chunks[chunks.length - 1]?.text}"`);
console.log();
}

async function runAllExamples() {
console.log('🚀 Running Fake Stream Examples\n');
console.log('='.repeat(60));

await example1_BasicUsage();
console.log('='.repeat(60));

await example2_WithConversationHistory();
console.log('='.repeat(60));

await example3_WithMCPTools();
console.log('='.repeat(60));

await example4_ErrorHandling();
console.log('='.repeat(60));

await example5_ChunkAnalysis();
console.log('='.repeat(60));

console.log('✅ All examples completed!');
}

// Run all examples
runAllExamples().catch(error => {
console.error('💥 Examples failed:', error);
process.exit(1);
});
12 changes: 9 additions & 3 deletions backend/routes/chat.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ async function sleep(ms) {
* Stream chat response from AI provider
*/
router.post('/stream', async (req, res) => {
const { message, model = 'gpt-4.1-nano', conversationHistory = [], sessionId = 'default', files = []} = req.body;
const { message, model = 'gpt-4.1-nano', conversationHistory = [], sessionId = 'default', files = [], useFakeStream = false} = req.body;

if (!message || !message.trim()) {
return res.status(400).json({ error: 'Message is required' });
Expand All @@ -33,8 +33,14 @@ router.post('/stream', async (req, res) => {
stream = await aiService.streamOpenAI(message, conversationHistory, model);
} else if (model.startsWith('gemini-')) {
provider = 'gemini';
// stream = await aiService.streamGeminiWithTools(message, conversationHistory, model, sessionId, files);
stream = await aiService.streamGemini(message, conversationHistory, model, sessionId, files, false);
// Use fake stream for debugging if requested
if (useFakeStream) {
console.log('🔧 Using FAKE Gemini stream for debugging');
stream = await aiService.streamGeminiFake(message, conversationHistory, model, sessionId, files, false);
} else {
// stream = await aiService.streamGeminiWithTools(message, conversationHistory, model, sessionId, files);
stream = await aiService.streamGemini(message, conversationHistory, model, sessionId, files, false);
}
} else {
throw new Error(`Unsupported model: ${model}`);
}
Expand Down
73 changes: 73 additions & 0 deletions backend/services/aiService.js
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,79 @@ class AIService {
}
}

/**
* Fake stream Gemini method for debugging streaming responses
* Simulates chunk-by-chunk streaming with configurable delays and content
*/
async streamGeminiFake(message, conversationHistory = [], model = config.gemini.model, sessionId = 'default', files = [], useMCPTools = true) {
console.log('🔧 Using FAKE Gemini stream for debugging');
console.log('Message:', message);
console.log('Model:', model);
console.log('Session ID:', sessionId);
console.log('Files:', files);
console.log('Use MCP Tools:', useMCPTools);
Comment on lines +383 to +388
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

These console.log statements are useful for debugging but can create a lot of noise if this code is ever used in a staging or production environment. It's a good practice to wrap them in a condition that checks for a development environment, like if (process.env.NODE_ENV === 'development') { ... }.


// Simulate a realistic AI response that gets streamed
const fakeResponse = `This is a fake streaming response for debugging purposes.

Your message was: "${message}"

I'm simulating how a real Gemini response would be streamed chunk by chunk. Each chunk represents a small piece of the complete response that would normally be generated by the AI model.

Key features of this fake stream:
- Configurable delay between chunks
- Realistic content structure
- Error simulation capabilities
- Tool call simulation (if enabled)

This helps you debug streaming issues without making actual API calls to Gemini.`;

// Split the response into chunks for streaming
const words = fakeResponse.split(' ');
const chunks = [];

// Create chunks of 2-4 words each
for (let i = 0; i < words.length; i += 3) {
const chunkWords = words.slice(i, i + 3);
chunks.push(chunkWords.join(' ') + ' ');
}

// Create an async generator that yields chunks with delays
const streamGenerator = async function* () {
for (let i = 0; i < chunks.length; i++) {
// Simulate network delay (50-150ms per chunk)
const delay = Math.random() * 100 + 50;
await new Promise(resolve => setTimeout(resolve, delay));

// Yield chunk in the same format as real Gemini
yield {
text: chunks[i],
chunkIndex: i,
totalChunks: chunks.length,
timestamp: new Date().toISOString()
};

console.log(`📦 Chunk ${i + 1}/${chunks.length}: "${chunks[i].trim()}"`);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This log inside the generator loop can be very verbose. Consider making it conditional, for example, by wrapping it in an if (process.env.NODE_ENV === 'development') block, to avoid cluttering logs in other environments.

}

// Optional: Simulate a tool call at the end
if (useMCPTools && Math.random() > 0.7) {
console.log('🔧 Simulating tool call...');
await new Promise(resolve => setTimeout(resolve, 200));
yield {
text: '',
toolCall: {
name: 'fake_tool',
parameters: { message: 'This is a simulated tool call' }
},
timestamp: new Date().toISOString()
};
}
};

return streamGenerator();
}

/**
* Stream response from Gemini without chat session reuse
*/
Expand Down
Loading