diff --git a/integrations/llms/openai3.mdx b/integrations/llms/openai3.mdx
index c69ad0e4..656cb774 100644
--- a/integrations/llms/openai3.mdx
+++ b/integrations/llms/openai3.mdx
@@ -362,6 +362,8 @@ Portkey supports OpenAI Tool Calling and makes it interoperable across multiple
```javascript Get Weather Tool
+import Portkey from 'portkey-ai';
+
let tools = [{
type: "function",
function: {
@@ -378,6 +380,11 @@ let tools = [{
}
}];
+const portkey = new Portkey({
+ apiKey: 'PORTKEY_API_KEY',
+ virtualKey: 'OPENAI_VIRTUAL_KEY'
+});
+
let response = await portkey.chat.completions.create({
model: "gpt-4o",
messages: [
@@ -393,6 +400,8 @@ console.log(response.choices[0].finish_reason);
```python Get Weather Tool
+from portkey_ai import Portkey
+
tools = [{
"type": "function",
"function": {
@@ -409,6 +418,11 @@ tools = [{
}
}]
+portkey = Portkey(
+ api_key = "PORTKEY_API_KEY",
+ virtual_key = "PROVIDER_VIRTUAL_KEY"
+ )
+
response = portkey.chat.completions.create(
model="gpt-4o",
messages=[
@@ -423,7 +437,7 @@ print(response.choices[0].finish_reason)
```
-```curl Get Weather Tool
+```sh Get Weather Tool
curl -X POST "https://api.portkey.ai/v1/chat/completions" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_PORTKEY_API_KEY" \
@@ -482,6 +496,13 @@ OpenAI's vision models can analyze images alongside text, enabling visual questi
```py Python
+from portkey_ai import Portkey
+
+portkey = Portkey(
+ api_key = "PORTKEY_API_KEY",
+ virtual_key = "OPENAI_VIRTUAL_KEY"
+ )
+
response = portkey.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
@@ -503,6 +524,13 @@ print(response)
```
```ts Node.js
+import Portkey from 'portkey-ai';
+
+const portkey = new Portkey({
+ apiKey: 'PORTKEY_API_KEY',
+ virtualKey: 'OPENAI_VIRTUAL_KEY'
+});
+
const response = await portkey.chat.completions.create({
model: "gpt-4-vision-preview",
messages: [
@@ -572,6 +600,13 @@ Simply send text to the embeddings API endpoint to generate these vectors for yo
```python Python
+from portkey_ai import Portkey
+
+portkey = Portkey(
+ api_key = "PORTKEY_API_KEY",
+ virtual_key = "OPENAI_VIRTUAL_KEY"
+)
+
response = portkey.embeddings.create(
input="Your text string goes here",
model="text-embedding-3-small"
@@ -581,6 +616,13 @@ print(response.data[0].embedding)
```
```javascript Node.js
+import Portkey from 'portkey-ai';
+
+const portkey = new Portkey({
+ apiKey: 'PORTKEY_API_KEY',
+ virtualKey: 'OPENAI_VIRTUAL_KEY'
+});
+
const response = await portkey.embeddings.create({
input: "Your text string goes here",
model: "text-embedding-3-small"
@@ -589,7 +631,7 @@ const response = await portkey.embeddings.create({
console.log(response.data[0].embedding);
```
-```curl REST
+```sh REST
curl -X POST "https://api.portkey.ai/v1/embeddings" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_PORTKEY_API_KEY" \
@@ -697,8 +739,14 @@ OpenAI's Audio API converts speech to text using the Whisper model. It offers tr
```python Python
+from portkey_ai import Portkey
audio_file= open("/path/to/file.mp3", "rb")
+portkey = Portkey(
+ api_key = "PORTKEY_API_KEY",
+ virtual_key = "OPENAI_VIRTUAL_KEY"
+)
+
# Transcription
transcription = portkey.audio.transcriptions.create(
model="whisper-1",
@@ -716,6 +764,13 @@ print(translation.text)
```javascript Node.js
import fs from "fs";
+import Portkey from 'portkey-ai';
+
+
+const portkey = new Portkey({
+ apiKey: 'PORTKEY_API_KEY',
+ virtualKey: 'OPENAI_VIRTUAL_KEY'
+});
// Transcription
async function transcribe() {
@@ -738,7 +793,7 @@ async function translate() {
translate();
```
-```curl REST
+```sh REST
# Transcription
curl -X POST "https://api.portkey.ai/v1/audio/transcriptions" \
-H "Authorization: Bearer YOUR_PORTKEY_API_KEY" \
@@ -768,6 +823,12 @@ OpenAI's Text to Speech (TTS) API converts written text into natural-sounding au
```python Python
from pathlib import Path
+from portkey_ai import Portkey
+
+portkey = Portkey(
+ api_key = "PORTKEY_API_KEY",
+ virtual_key = "OPENAI_VIRTUAL_KEY"
+)
speech_file_path = Path(__file__).parent / "speech.mp3"
response = portkey.audio.speech.create(
@@ -786,6 +847,12 @@ import fs from 'fs';
const speechFile = path.resolve("./speech.mp3");
+const portkey = new Portkey({
+ apiKey: 'PORTKEY_API_KEY',
+ virtualKey: 'OPENAI_VIRTUAL_KEY'
+});
+
+
async function main() {
const mp3 = await portkey.audio.speech.createCertainly! I'll continue with the Text to Speech section and then move on to the additional features and sections:
@@ -802,7 +869,7 @@ async function main() {
main();
```
-```curl REST
+```sh REST
curl -X POST "https://api.portkey.ai/v1/audio/speech" \
-H "Authorization: Bearer YOUR_PORTKEY_API_KEY" \
-H "Content-Type: application/json" \
@@ -885,7 +952,7 @@ const chatCompletion = await portkey.chat.completions.create({
});
```
-```curl REST
+```sh REST
curl -X POST "https://api.portkey.ai/v1/chat/completions" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_PORTKEY_API_KEY" \
@@ -1042,30 +1109,235 @@ Portkey's AI gateway enables you to enforce input/output checks on requests by a
## Popular Libraries
-You can make your OpenAI integrations with popular libraries also production-ready and reliable with native integrations.
+You can make your OpenAI integrations with popular libraries also production-ready and reliable with native integrations. Portkey supports all the major libraries and frameworks. Here are some of the famous ones:
### OpenAI with Langchain
-
+
+
+
+
+ ```sh
+ pip install -U langchain-core portkey_ai langchain-openai
```
+
+ ```py
+ from langchain_openai import ChatOpenAI
+ from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
+
+ llm = ChatOpenAI(api_key=PROVIDER_API_KEY,
+ base_url=PORTKEY_GATEWAY_URL,
+ default_headers=createHeaders(
+ api_key=PORTKEY_API_KEY,
+ provider="openai"))
+
+ llm.invoke("What is the meaning of life, universe and everything?")
```
-
+
+
+ ```sh
+ npm install langchain portkey-ai @langchain/openai
+ ```
+ ```js
+ import { ChatOpenAI } from "@langchain/openai";
+ import { createHeaders, PORTKEY_GATEWAY_URL} from "portkey-ai"
+
+ const PORTKEY_API_KEY = "..."
+ const PROVIDER_API_KEY = "..." // Add the API key of the AI provider being used
+
+ const portkeyConf = {
+ baseURL: PORTKEY_GATEWAY_URL,
+ defaultHeaders: createHeaders({apiKey: PORTKEY_API_KEY, provider: "openai"})
+ }
+
+ const chatModel = new ChatOpenAI({
+ apiKey: PROVIDER_API_KEY,
+ configuration: portkeyConf
+ });
+
+ await chatModel.invoke("What is the meaning of life, universe and everything?")
+ ```
+
+
+
+
+
### OpenAI with LangGraph
-
+
+
+ ```sh
+ pip install -U langgraph langchain_openai portkey-ai
```
+
+ ```py
+ from langchain_openai import ChatOpenAI
+ from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
+
+ llm = ChatOpenAI(
+ api_key="dummy", # We'll pass a dummy API key here
+ base_url=PORTKEY_GATEWAY_URL,
+ default_headers=createHeaders(
+ api_key="PORTKEY_API_KEY",
+ virtual_key="YOUR_LLM_PROVIDER_VIRTUAL_KEY" # Pass your virtual key saved on Portkey for any provider you'd like (Anthropic, OpenAI, Groq, etc.)
+ )
+ )
```
-
+
+
+ ```sh
+ npm i @langchain/langgraph @langchain/openai portkey-ai
+ ```
+ ```js
+ import { ChatOpenAI } from "@langchain/openai";
+ import { createHeaders, PORTKEY_GATEWAY_URL } from "portkey-ai";
+
+ // Configure Portkey settings
+ const portkeyConf = {
+ baseURL: PORTKEY_GATEWAY_URL,
+ defaultHeaders: createHeaders({
+ apiKey: "PORTKEY_API_KEY",
+ virtualKey: "OPENAI_VIRTUAL_KEY"
+ })
+ };
+
+ // Initialize the LLM with Portkey configuration
+ const llm = new ChatOpenAI({
+ apiKey: "dummy",
+ configuration: portkeyConf
+ model: "gpt-4o" // or your preferred model
+ });
+ ```
+
+
+
### OpenAI with LibreChat
+**Step 1:** Create the `docker-compose-override.yaml` file
+Create this file following the instructions here. This file will point to the librechat.yaml file where we will configure our Portkey settings (in Step 3).
+
+```yaml
+docker-compose.override.yml
+
+services:
+ api:
+ volumes:
+ - type: bind
+ source: ./librechat.yaml
+ target: /app/librechat.yaml
+```
+
+**Step 2:** Edit your existing .env file at the project root (if the file does not exist, copy the .env.example file and rename to .env).
+We will add.
+
+```env
+PORTKEY_API_KEY=YOUR_PORTKEY_API_KEY
+PORTKEY_GATEWAY_URL=https://api.portkey.ai/v1
+```
+
+**Step 3:** Edit the `librechat.yaml` file with this code
+
+ ```yaml LibreChat yaml
+ version: 1.1.4
+ cache: true
+ endpoints:
+ custom:
+ - name: "Portkey"
+ apiKey: "dummy"
+ baseURL: ${PORTKEY_GATEWAY_URL}
+ headers:
+ x-portkey-api-key: "${PORTKEY_API_KEY}"
+ x-portkey-virtual-key: "PORTKEY_OPENAI_VIRTUAL_KEY"
+ models:
+ default: ["gpt-4o-mini"]
+ fetch: true
+ titleConvo: true
+ titleModel: "current_model"
+ summarize: false
+ summaryModel: "current_model"
+ forcePrompt: false
+ modelDisplayLabel: "Portkey:OpenAI"
+ ```
+
### OpenAI with CrewAI
+ ```python Python SDK
+ pip install -qU crewai portkey-ai
+ ```
+ ```py Python SDKs
+ from crewai import LLM
+ from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
+
+ gpt_llm = LLM(
+ model="gpt-4",
+ base_url=PORTKEY_GATEWAY_URL,
+ api_key="dummy", # We are using Virtual key
+ extra_headers=createHeaders(
+ api_key="YOUR_PORTKEY_API_KEY",
+ virtual_key="YOUR_VIRTUAL_KEY", # Enter your OpenAI Virtual key from Portkey
+ config="YOUR_PORTKEY_CONFIG_ID", # All your model parameters and routing strategy
+ trace_id="llm1"
+ )
+ )
+ ```
+
### OpenAI with Llamaindex
+
+ ```Python Python SDK
+ pip install llama-index-llms-openai portkey-ai
+ ```
+
+ ```python Python SDK
+ from llama_index.llms.openai import OpenAI
+ from llama_index.core.llms import ChatMessage
+ from portkey_ai import PORTKEY_GATEWAY_URL, createHeaders
+
+ portkey = OpenAI(
+ api_base=PORTKEY_GATEWAY_URL,
+ api_key="xx" # Placeholder, no need to set
+ default_headers=createHeaders(
+ api_key="YOUR_PORTKEY_API_KEY",
+ config=config
+ )
+ )
+ messages = [
+ ChatMessage(role="system", content="You are a pirate with a colorful personality"),
+ ChatMessage(role="user", content="What is your name"),
+ ]
+
+ resp = portkey.chat(messages)
+ print(resp)
+ ```
+
+
### OpenAI with Vercel
+ ```js Node JS
+ npm install @portkey-ai/vercel-provider
+ ```
+ ``` Node JS
+ import { createPortkey } from '@portkey-ai/vercel-provider';
+
+ const portkeyConfig = {
+ "provider": "openai", // Choose your provider (e.g., 'anthropic')
+ "api_key": "OPENAI_API_KEY",
+ "override_params": {
+ "model": "gpt-4o" // Select from 250+ models
+ }
+ };
+
+ const portkey = createPortkey({
+ apiKey: 'YOUR_PORTKEY_API_KEY',
+ config: portkeyConfig,
+ });
+ const { text } = await generateText({
+ model: portkey.chatModel(''), // Provide an empty string, we defined the model in the config
+ prompt: 'What is Portkey?',
+ });
+ ```
---
### More Libraries