Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions components/Assistant.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

import React, { useState, useRef, useEffect } from 'react';
import { ChatMessage } from '../types';
import { GeminiAdapter } from '../services/aiAdapter';
import { aiService } from '../services/aiAdapter';

const Assistant: React.FC = () => {
const [isOpen, setIsOpen] = useState(false);
Expand Down Expand Up @@ -34,7 +34,7 @@ const Assistant: React.FC = () => {
// Check if user wants to generate an image
if (inputValue.toLowerCase().includes('gerar imagem de')) {
const prompt = inputValue.replace(/gerar imagem de/i, '').trim();
const imageUrl = await GeminiAdapter.generateImage(prompt);
const imageUrl = await aiService.generateImage(prompt);
if (imageUrl) {
const aiMsg: ChatMessage = { role: 'model', text: `Aqui está a imagem de: ${prompt}`, imageUrl, timestamp: Date.now() };
setMessages(prev => [...prev, aiMsg]);
Expand All @@ -45,7 +45,7 @@ const Assistant: React.FC = () => {
} else {
const history = messages.map(m => ({ role: m.role, text: m.text }));
// Use fast mode for chat
const responseText = await GeminiAdapter.sendMessage(history, userMsg.text, 'fast');
const responseText = await aiService.sendMessage(history, userMsg.text, 'fast');

const aiMsg: ChatMessage = { role: 'model', text: responseText, timestamp: Date.now() };
setMessages(prev => [...prev, aiMsg]);
Expand Down
4 changes: 2 additions & 2 deletions components/ImageGenerator.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import React, { useState } from 'react';
import { GeminiAdapter } from '../services/aiAdapter';
import { aiService } from '../services/aiAdapter';

interface ImageGeneratorProps {
onImageGenerated: (imageUrl: string) => void;
Expand All @@ -13,7 +13,7 @@ const ImageGenerator: React.FC<ImageGeneratorProps> = ({ onImageGenerated }) =>
const generateImage = async () => {
setLoading(true);
try {
const imageUrl = await GeminiAdapter.generateImage(prompt);
const imageUrl = await aiService.generateImage(prompt);
if (imageUrl) {
onImageGenerated(imageUrl);
}
Expand Down
11 changes: 11 additions & 0 deletions services/aiAdapter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import { aiFactory } from './aiFactory';
import { cache } from './cache';
import { PRODUCTS } from '../constants';
import { OllamaAdapter } from './ollamaAdapter';

export interface AIAdapter {
sendMessage(history: {role: string, text: string}[], newMessage: string, mode: 'fast' | 'complex'): Promise<string>;
Expand Down Expand Up @@ -98,3 +99,13 @@ export const GeminiAdapter: AIAdapter = {
}
}
};

/**
* Unified AI service that delegates to the configured provider.
* Set the AI_PROVIDER environment variable to "ollama" to use Ollama,
* otherwise defaults to Gemini.
*/
const getProvider = (): string => process.env.AI_PROVIDER || 'gemini';

export const aiService: AIAdapter =
getProvider() === 'ollama' ? OllamaAdapter : GeminiAdapter;
95 changes: 95 additions & 0 deletions services/ollamaAdapter.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
/**
* @license
* SPDX-License-Identifier: Apache-2.0
*/

import { cache } from './cache';
import { PRODUCTS } from '../constants';
import type { AIAdapter } from './aiAdapter';

const OLLAMA_BASE_URL = process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
const OLLAMA_MODEL = process.env.OLLAMA_MODEL || 'llama3';

const SYSTEM_INSTRUCTION: string = (() => {
const productContext = PRODUCTS.map(p =>
`- ${p.name} ($${p.price}): ${p.description}. Features: ${p.features.join(', ')}`
).join('\n');

return `You are the AI Concierge for "Achadinhos Maternidade", a warm, organic lifestyle tech brand.
Your tone is calm, inviting, grounded, and sophisticated. Avoid overly "techy" jargon; prefer words like "natural", "seamless", "warm", and "texture".

Here is our current product catalog:
${productContext}

Answer customer questions about specifications, recommendations, and brand philosophy.
Keep answers concise (under 3 sentences usually) to fit the chat UI.
If asked about products not in the list, gently steer them back to Achadinhos Maternidade products.`;
})();

/**
* Compute a lightweight hash string for a chat history + new message.
* Uses a djb2-style algorithm.
*/
const hashChatKey = (history: {role: string, text: string}[], newMessage: string): string => {
let hash = 5381;
const str = history.map(h => `${h.role}:${h.text}`).join('|') + '|' + newMessage;
for (let i = 0; i < str.length; i++) {
hash = ((hash << 5) + hash) ^ str.charCodeAt(i);
hash = hash >>> 0;
}
return `ollama_chat_${history.length}_${hash}`;
};

/**
* Map the internal role names to Ollama-compatible roles.
* Ollama expects "system", "user", "assistant".
*/
const mapRole = (role: string): string => {
if (role === 'model') return 'assistant';
return role;
};

export const OllamaAdapter: AIAdapter = {
sendMessage: async (history, newMessage, _mode) => {
const cacheKey = hashChatKey(history, newMessage);
const cachedResponse = cache.get(cacheKey);
if (cachedResponse) return cachedResponse;

try {
const messages = [
{ role: 'system', content: SYSTEM_INSTRUCTION },
...history.map(h => ({ role: mapRole(h.role), content: h.text })),
{ role: 'user', content: newMessage },
];

const response = await fetch(`${OLLAMA_BASE_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: OLLAMA_MODEL,
messages,
stream: false,
}),
});

if (!response.ok) {
throw new Error(`Ollama API returned ${response.status}: ${response.statusText}`);
}

const data = await response.json();
const responseText = data.message?.content || '';

cache.set(cacheKey, responseText);
return responseText;
} catch (error) {
console.error('Ollama API Error:', error);
return 'Desculpe, estou com dificuldades para me conectar ao serviço de IA no momento.';
}
},

generateImage: async (_prompt) => {
// Ollama does not natively support image generation.
console.warn('Image generation is not supported by the Ollama provider.');
return null;
},
};
5 changes: 4 additions & 1 deletion vite.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,10 @@ export default defineConfig(({ mode }) => {
plugins: [react()],
define: {
'process.env.API_KEY': JSON.stringify(env.GEMINI_API_KEY),
'process.env.GEMINI_API_KEY': JSON.stringify(env.GEMINI_API_KEY)
'process.env.GEMINI_API_KEY': JSON.stringify(env.GEMINI_API_KEY),
'process.env.AI_PROVIDER': JSON.stringify(env.AI_PROVIDER || 'gemini'),
'process.env.OLLAMA_BASE_URL': JSON.stringify(env.OLLAMA_BASE_URL || 'http://localhost:11434'),
'process.env.OLLAMA_MODEL': JSON.stringify(env.OLLAMA_MODEL || 'llama3'),
},
resolve: {
alias: {
Expand Down