Skip to content

Commit 95c753a

Browse files
committed
Merge branch 'pr/815'
2 parents 0bb8b7e + c6d084f commit 95c753a

File tree

7 files changed

+138
-1
lines changed

7 files changed

+138
-1
lines changed

README.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,9 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
9090
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
9191
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
9292
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
93-
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
93+
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
94+
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
95+
- `AIMLAPI`: Your AI/ML API key. **Only needed if you want to use AI/ML API models and embeddings.**
9496

9597
**Note**: You can change these after starting Perplexica from the settings dialog.
9698

sample.config.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
2525
[MODELS.DEEPSEEK]
2626
API_KEY = ""
2727

28+
[MODELS.AIMLAPI]
29+
API_KEY = "" # Required to use AI/ML API chat and embedding models
30+
2831
[MODELS.LM_STUDIO]
2932
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
3033

src/app/api/config/route.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import {
88
getOllamaApiEndpoint,
99
getOpenaiApiKey,
1010
getDeepseekApiKey,
11+
getAimlApiKey,
1112
getLMStudioApiEndpoint,
1213
updateConfig,
1314
} from '@/lib/config';
@@ -57,6 +58,7 @@ export const GET = async (req: Request) => {
5758
config['groqApiKey'] = getGroqApiKey();
5859
config['geminiApiKey'] = getGeminiApiKey();
5960
config['deepseekApiKey'] = getDeepseekApiKey();
61+
config['aimlApiKey'] = getAimlApiKey();
6062
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
6163
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
6264
config['customOpenaiModelName'] = getCustomOpenaiModelName();
@@ -95,6 +97,9 @@ export const POST = async (req: Request) => {
9597
DEEPSEEK: {
9698
API_KEY: config.deepseekApiKey,
9799
},
100+
AIMLAPI: {
101+
API_KEY: config.aimlApiKey,
102+
},
98103
LM_STUDIO: {
99104
API_URL: config.lmStudioApiUrl,
100105
},

src/app/settings/page.tsx

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ interface SettingsType {
2323
ollamaApiUrl: string;
2424
lmStudioApiUrl: string;
2525
deepseekApiKey: string;
26+
aimlApiKey: string;
2627
customOpenaiApiKey: string;
2728
customOpenaiApiUrl: string;
2829
customOpenaiModelName: string;
@@ -862,6 +863,25 @@ const Page = () => {
862863
/>
863864
</div>
864865

866+
<div className="flex flex-col space-y-1">
867+
<p className="text-black/70 dark:text-white/70 text-sm">
868+
AI/ML API Key
869+
</p>
870+
<Input
871+
type="text"
872+
placeholder="AI/ML API Key"
873+
value={config.aimlApiKey}
874+
isSaving={savingStates['aimlApiKey']}
875+
onChange={(e) => {
876+
setConfig((prev) => ({
877+
...prev!,
878+
aimlApiKey: e.target.value,
879+
}));
880+
}}
881+
onSave={(value) => saveConfig('aimlApiKey', value)}
882+
/>
883+
</div>
884+
865885
<div className="flex flex-col space-y-1">
866886
<p className="text-black/70 dark:text-white/70 text-sm">
867887
LM Studio API URL

src/lib/config.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,9 @@ interface Config {
3535
DEEPSEEK: {
3636
API_KEY: string;
3737
};
38+
AIMLAPI: {
39+
API_KEY: string;
40+
};
3841
LM_STUDIO: {
3942
API_URL: string;
4043
};
@@ -85,6 +88,8 @@ export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
8588

8689
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
8790

91+
export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY;
92+
8893
export const getCustomOpenaiApiKey = () =>
8994
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
9095

src/lib/providers/aimlapi.ts

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
2+
import { getAimlApiKey } from '../config';
3+
import { ChatModel, EmbeddingModel } from '.';
4+
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
5+
import { Embeddings } from '@langchain/core/embeddings';
6+
import axios from 'axios';
7+
8+
export const PROVIDER_INFO = {
9+
key: 'aimlapi',
10+
displayName: 'AI/ML API',
11+
};
12+
13+
interface AimlApiModel {
14+
id: string;
15+
name?: string;
16+
type?: string;
17+
}
18+
19+
const API_URL = 'https://api.aimlapi.com';
20+
21+
export const loadAimlApiChatModels = async () => {
22+
const apiKey = getAimlApiKey();
23+
24+
if (!apiKey) return {};
25+
26+
try {
27+
const response = await axios.get(`${API_URL}/models`, {
28+
headers: {
29+
'Content-Type': 'application/json',
30+
Authorization: `Bearer ${apiKey}`,
31+
},
32+
});
33+
34+
const chatModels: Record<string, ChatModel> = {};
35+
36+
response.data.data.forEach((model: AimlApiModel) => {
37+
if (model.type === 'chat-completion') {
38+
chatModels[model.id] = {
39+
displayName: model.name || model.id,
40+
model: new ChatOpenAI({
41+
openAIApiKey: apiKey,
42+
modelName: model.id,
43+
temperature: 0.7,
44+
configuration: {
45+
baseURL: API_URL,
46+
},
47+
}) as unknown as BaseChatModel,
48+
};
49+
}
50+
});
51+
52+
return chatModels;
53+
} catch (err) {
54+
console.error(`Error loading AI/ML API models: ${err}`);
55+
return {};
56+
}
57+
};
58+
59+
export const loadAimlApiEmbeddingModels = async () => {
60+
const apiKey = getAimlApiKey();
61+
62+
if (!apiKey) return {};
63+
64+
try {
65+
const response = await axios.get(`${API_URL}/models`, {
66+
headers: {
67+
'Content-Type': 'application/json',
68+
Authorization: `Bearer ${apiKey}`,
69+
},
70+
});
71+
72+
const embeddingModels: Record<string, EmbeddingModel> = {};
73+
74+
response.data.data.forEach((model: AimlApiModel) => {
75+
if (model.type === 'embedding') {
76+
embeddingModels[model.id] = {
77+
displayName: model.name || model.id,
78+
model: new OpenAIEmbeddings({
79+
openAIApiKey: apiKey,
80+
modelName: model.id,
81+
configuration: {
82+
baseURL: API_URL,
83+
},
84+
}) as unknown as Embeddings,
85+
};
86+
}
87+
});
88+
89+
return embeddingModels;
90+
} catch (err) {
91+
console.error(`Error loading AI/ML API embeddings models: ${err}`);
92+
return {};
93+
}
94+
};

src/lib/providers/index.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,11 @@ import {
3535
loadDeepseekChatModels,
3636
PROVIDER_INFO as DeepseekInfo,
3737
} from './deepseek';
38+
import {
39+
loadAimlApiChatModels,
40+
loadAimlApiEmbeddingModels,
41+
PROVIDER_INFO as AimlApiInfo,
42+
} from './aimlapi';
3843
import {
3944
loadLMStudioChatModels,
4045
loadLMStudioEmbeddingsModels,
@@ -49,6 +54,7 @@ export const PROVIDER_METADATA = {
4954
gemini: GeminiInfo,
5055
transformers: TransformersInfo,
5156
deepseek: DeepseekInfo,
57+
aimlapi: AimlApiInfo,
5258
lmstudio: LMStudioInfo,
5359
custom_openai: {
5460
key: 'custom_openai',
@@ -76,6 +82,7 @@ export const chatModelProviders: Record<
7682
anthropic: loadAnthropicChatModels,
7783
gemini: loadGeminiChatModels,
7884
deepseek: loadDeepseekChatModels,
85+
aimlapi: loadAimlApiChatModels,
7986
lmstudio: loadLMStudioChatModels,
8087
};
8188

@@ -87,6 +94,7 @@ export const embeddingModelProviders: Record<
8794
ollama: loadOllamaEmbeddingModels,
8895
gemini: loadGeminiEmbeddingModels,
8996
transformers: loadTransformersEmbeddingsModels,
97+
aimlapi: loadAimlApiEmbeddingModels,
9098
lmstudio: loadLMStudioEmbeddingsModels,
9199
};
92100

0 commit comments

Comments
 (0)