Skip to content

Commit 7fef4fa

Browse files
Merge pull request #28 from tyllenb/main
Add OpenRouter for access to more AI Models
2 parents da320fb + 7247994 commit 7fef4fa

File tree

4 files changed

+89
-59
lines changed

4 files changed

+89
-59
lines changed

app/src/components/Chat.tsx

+39-41
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,7 @@ import PromptForm from "./PromptForm";
77
import { toast } from "sonner";
88
import NewConversationButton from "./NewConversationButton";
99
import { NavMenu } from "./NavMenu";
10-
import {
11-
useQuery,
12-
useMutation,
13-
} from '@tanstack/react-query'
10+
import { useQuery, useMutation } from "@tanstack/react-query";
1411
import SideMenu from "./SideMenu";
1512

1613
export default function Chat({
@@ -25,44 +22,48 @@ export default function Chat({
2522
const [messages, setMessages] = useState<Message[]>([]);
2623
const [conversationId, setConversationId] = useState<number | null>(null);
2724
const [waitingForResponse, setWaitingForResponse] = useState(false);
28-
25+
2926
const sendMessageAndReceiveResponse = useMutation({
3027
mutationFn: async (userMessage: Message) => {
31-
const { data: sendMessageData, error: sendMessageError } = await supabaseClient
32-
.from('conversations')
33-
.update({ context: [...messages, userMessage] })
34-
.eq('id', conversationId);
35-
28+
const { data: sendMessageData, error: sendMessageError } =
29+
await supabaseClient
30+
.from("conversations")
31+
.update({ context: [...messages, userMessage] })
32+
.eq("id", conversationId);
33+
3634
if (sendMessageError) throw sendMessageError;
37-
35+
3836
setMessages([...messages, userMessage]);
3937
setWaitingForResponse(true);
4038

41-
const { data: aiResponseData, error: aiResponseError } = await supabaseClient.functions.invoke("chat", {
42-
body: { messageHistory: [...messages, userMessage] },
43-
});
39+
const { data: aiResponseData, error: aiResponseError } =
40+
await supabaseClient.functions.invoke("chat", {
41+
body: { messageHistory: [...messages, userMessage] },
42+
});
4443

4544
if (aiResponseError) throw aiResponseError;
46-
47-
const {data: updateConversationData, error: updateConversationError} = await supabaseClient
48-
.from('conversations')
49-
.update({ context: [...messages, userMessage, aiResponseData.msg] })
50-
.eq('id', conversationId);
51-
45+
46+
const { data: updateConversationData, error: updateConversationError } =
47+
await supabaseClient
48+
.from("conversations")
49+
.update({ context: [...messages, userMessage, aiResponseData.msg] })
50+
.eq("id", conversationId);
51+
5252
if (updateConversationError) throw updateConversationError;
53-
53+
5454
return aiResponseData;
5555
},
5656
onError: (error) => {
5757
toast.error(error.message || "Unknown error");
5858
setWaitingForResponse(false);
5959
},
6060
onSuccess: (aiResponse) => {
61-
setMessages(currentMessages => {
61+
setMessages((currentMessages) => {
6262
return [...currentMessages, aiResponse.msg as Message];
6363
});
64+
6465
setWaitingForResponse(false);
65-
}
66+
},
6667
});
6768

6869
const newConversation = useMutation({
@@ -94,16 +95,16 @@ export default function Chat({
9495
setConversationId(data[0].id);
9596
setWaitingForResponse(false);
9697
},
97-
})
98+
});
9899

99100
const getConversation = useQuery({
100-
queryKey: ['conversation', conversationId],
101+
queryKey: ["conversation", conversationId],
101102
queryFn: async () => {
102103
if (conversationId === null) {
103104
const { data, error } = await supabaseClient
104-
.from('conversations')
105-
.select('*')
106-
.order('created_at', { ascending: false })
105+
.from("conversations")
106+
.select("*")
107+
.order("created_at", { ascending: false })
107108
.limit(1);
108109
if (error) {
109110
throw error;
@@ -118,17 +119,17 @@ export default function Chat({
118119
} else {
119120
setMessages([]);
120121
const { data, error } = await supabaseClient
121-
.from('conversations')
122-
.select('*')
123-
.eq('id', conversationId)
122+
.from("conversations")
123+
.select("*")
124+
.eq("id", conversationId)
124125
.single();
125126
if (error) {
126127
throw error;
127128
}
128129
return data;
129130
}
130-
}
131-
})
131+
},
132+
});
132133

133134
useEffect(() => {
134135
if (getConversation.data) {
@@ -147,10 +148,10 @@ export default function Chat({
147148
<>
148149
<div className="h-24 bg-gradient-to-b from-background flex justify-between items-center fixed top-0 w-full"></div>
149150
<div className="fixed flex space-x-4 top-4 left-4">
150-
<SideMenu
151-
supabaseClient={supabaseClient}
152-
setConversationId={setConversationId}
153-
/>
151+
<SideMenu
152+
supabaseClient={supabaseClient}
153+
setConversationId={setConversationId}
154+
/>
154155
</div>
155156
<div className="fixed flex space-x-4 top-4 right-4">
156157
<NavMenu>
@@ -165,10 +166,7 @@ export default function Chat({
165166
</div>
166167

167168
<div className="p-8 mt-12 mb-32">
168-
<ChatLog
169-
messages={messages}
170-
waitingForResponse={waitingForResponse}
171-
/>
169+
<ChatLog messages={messages} waitingForResponse={waitingForResponse} />
172170
</div>
173171

174172
<div ref={bottomRef} />

docs/getting_started.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,8 @@ We will use Supabase as our database (with vector search, pgvector), authenticat
6060
10. Now when we have the CLI, we need to login with oour Supabase account, running `supabase login` - this should pop up a browser window, which should prompt you through the auth
6161
11. And link our Supabase CLI to a specific project, our newly created one, by running `supabase link --project-ref <your-project-id>` (you can check what the project id is from the Supabase web UI, or by running `supabase projects list`, and it will be under "reference id") - you can skip (enter) the database password, it's not needed.
6262
12. Now let's deploy our functions! ([see guide for more details](https://supabase.com/../guides/functions/deploy)) `supabase functions deploy --no-verify-jwt` (see [issue re:security](https://github.com/adamcohenhillel/AdDeus/issues/3))
63-
13. Lasly - if you're planning to first use OpenAI as your Foundation model provider, then you'd need to also run the following command, to make sure the functions have everything they need to run properly: `supabase secrets set OPENAI_API_KEY=<your-openai-api-key>` (Ollama setup guide is coming out soon)
63+
13. If you're planning to first use OpenAI as your Foundation model provider, then you'd need to also run the following command, to make sure the functions have everything they need to run properly: `supabase secrets set OPENAI_API_KEY=<your-openai-api-key>` (Ollama setup guide is coming out soon)
64+
14. If you want access to tons of AI Models, both Open & Closed Source, set up your OpenRouter API Key. Go to [OpenRouter](https://openrouter.ai/) to get your API Key, then run `supabase secrets set OPENROUTER_API_KEY=<your-openrouter-api-key>`.
6465

6566
If everything worked, we should now be able to start chatting with our personal AI via the app - so let's set that up!
6667

docs/images/openrouter.png

19.6 KB
Loading

supabase/functions/chat/index.ts

+48-17
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,25 @@ import { corsHeaders } from "../common/cors.ts";
55
import { supabaseClient } from "../common/supabaseClient.ts";
66
import { ApplicationError, UserError } from "../common/errors.ts";
77

8+
async function generateResponse(
9+
useOpenRouter,
10+
openaiClient,
11+
openRouterClient,
12+
messages
13+
) {
14+
const client = useOpenRouter ? openRouterClient : openaiClient;
15+
const modelName = useOpenRouter
16+
? "nousresearch/nous-capybara-34b"
17+
: "gpt-4-1106-preview";
18+
19+
const { choices } = await client.chat.completions.create({
20+
model: modelName,
21+
messages,
22+
});
23+
console.log("Completion: ", choices[0]);
24+
return choices[0].message;
25+
}
26+
827
const chat = async (req) => {
928
if (req.method === "OPTIONS") {
1029
return new Response("ok", { headers: corsHeaders });
@@ -21,29 +40,43 @@ const chat = async (req) => {
2140
throw new ApplicationError(
2241
"Unable to get auth user details in request data"
2342
);
24-
const { messageHistory } = await req.json();
43+
const requestBody = await req.json();
44+
const { messageHistory } = requestBody;
45+
2546
if (!messageHistory) throw new UserError("Missing query in request data");
2647

2748
const openaiClient = new OpenAI({
2849
apiKey: Deno.env.get("OPENAI_API_KEY"),
2950
});
3051

52+
const openRouterApiKey = Deno.env.get("OPENROUTER_API_KEY");
53+
const useOpenRouter = Boolean(openRouterApiKey); // Use OpenRouter if API key is available
54+
55+
let openRouterClient;
56+
if (useOpenRouter) {
57+
openRouterClient = new OpenAI({
58+
baseURL: "https://openrouter.ai/api/v1",
59+
apiKey: openRouterApiKey,
60+
});
61+
}
62+
3163
console.log("messageHistory: ", messageHistory);
3264

33-
// embed the last messageHistory message
65+
// Embed the last messageHistory message using OpenAI's embeddings API
3466
const embeddingsResponse = await openaiClient.embeddings.create({
3567
model: "text-embedding-ada-002",
3668
input: messageHistory[messageHistory.length - 1].content,
3769
});
3870
const embeddings = embeddingsResponse.data[0].embedding;
3971
console.log("Embeddings:", embeddings);
4072

73+
// Retrieve records from Supabase based on embeddings similarity
4174
const { data: relevantRecords, error: recordsError } = await supabase.rpc(
4275
"match_records_embeddings_similarity",
4376
{
44-
query_embedding: JSON.stringify(embeddings), // Pass the embedding you want to compare
45-
match_threshold: 0.8, // Choose an appropriate threshold for your data
46-
match_count: 10, // Choose the number of matches
77+
query_embedding: JSON.stringify(embeddings),
78+
match_threshold: 0.8,
79+
match_count: 10,
4780
}
4881
);
4982

@@ -67,27 +100,25 @@ const chat = async (req) => {
67100
console.log("messages: ", messages);
68101

69102
try {
70-
let completion = await openaiClient.chat.completions.create({
71-
model: "gpt-4-1106-preview",
72-
messages: messages,
73-
});
74-
console.log("completion: ", completion);
75-
console.log(
76-
"completion.choices[0].content: ",
77-
completion.choices[0].content
103+
const responseMessage = await generateResponse(
104+
useOpenRouter,
105+
openaiClient,
106+
openRouterClient,
107+
messages
78108
);
109+
79110
return new Response(
80111
JSON.stringify({
81-
msg: completion.choices[0].message,
112+
msg: responseMessage,
82113
}),
83114
{
84115
headers: { ...corsHeaders, "Content-Type": "application/json" },
85116
status: 200,
86117
}
87118
);
88-
} catch (openAiError) {
89-
console.log("!!! Error in OpenAI fallback: ", openAiError);
90-
throw openAiError;
119+
} catch (error) {
120+
console.log("Error: ", error);
121+
throw new ApplicationError("Error processing chat completion");
91122
}
92123

93124
return new Response(

0 commit comments

Comments
 (0)