Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
/* eslint-disable no-console */
/**
* Basic usage example for OpenAI Agents SDK instrumentation
*
* This example demonstrates how to:
* 1. Set up OpenInference instrumentation for OpenAI Agents
* 2. Create a simple agent with a tool
* 3. Run the agent and observe the generated spans
*
* Prerequisites:
* - Set OPENAI_API_KEY environment variable
* - Install dependencies: @openai/agents, @opentelemetry/sdk-trace-node
*/

import { instrumentation, provider } from "./instrumentation";

// IMPORTANT: Import the SDK as a namespace so we can pass it to instrument()
import * as agentsSdk from "@openai/agents";
import { z } from "zod";

// Define a simple weather tool
const getWeather = agentsSdk.tool({
name: "get_weather",
description: "Get the current weather for a location",
parameters: z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA"),
}),
execute: async ({ location }) => {
// Mock weather data
return {
location,
temperature: 72,
unit: "F",
conditions: "sunny",
};
},
});

// Create an agent with the weather tool
const weatherAgent = new agentsSdk.Agent({
name: "WeatherAgent",
instructions:
"You are a helpful weather assistant. Use the get_weather tool to answer questions about the weather.",
tools: [getWeather],
});

async function main() {
// Instrument using the SDK module from our static import
// This ensures the processor is registered with the correct module instance
instrumentation.instrument(agentsSdk);

console.log("Running weather agent...\n");

try {
const result = await agentsSdk.run(
weatherAgent,
"What's the weather like in San Francisco?",
);

console.log("\nAgent response:", result.finalOutput);
} catch (error) {
console.error("Error running agent:", error);
}

// Force flush spans to ensure they are exported
await provider.forceFlush();

// Give time for spans to be exported
await new Promise((resolve) => setTimeout(resolve, 2000));

// Shutdown provider
await provider.shutdown();
}

main().catch(console.error);
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
/* eslint-disable no-console */
/**
* Guardrails example for OpenAI Agents SDK instrumentation
*
* This example demonstrates how to:
* 1. Set up OpenInference instrumentation for OpenAI Agents
* 2. Create input guardrails to validate user input
* 3. Create output guardrails to validate agent responses
* 4. Track guardrail executions in traces
*
* Prerequisites:
* - Set OPENAI_API_KEY environment variable
* - Install dependencies: @openai/agents, @opentelemetry/sdk-trace-node
*/

import { instrumentation, provider } from "./instrumentation";

import type { InputGuardrail } from "@openai/agents";
// IMPORTANT: Import the SDK as a namespace so we can pass it to instrument()
import * as agentsSdk from "@openai/agents";

// Define an input guardrail that checks for inappropriate content
const contentFilter: InputGuardrail = {
name: "ContentFilter",
execute: async ({ input }) => {
// Check for blocked words (simplified example)
const blockedWords = ["spam", "hack", "illegal"];
const inputStr = typeof input === "string" ? input : JSON.stringify(input);
const lowerInput = inputStr.toLowerCase();

for (const word of blockedWords) {
if (lowerInput.includes(word)) {
return {
tripwireTriggered: true,
outputInfo: {
reason: `Input contains blocked word: ${word}`,
blocked: true,
},
};
}
}

return {
tripwireTriggered: false,
outputInfo: {
reason: "Input passed content filter",
blocked: false,
},
};
},
};

// Define an input guardrail that limits input length
const lengthGuardrail: InputGuardrail = {
name: "InputLengthGuardrail",
execute: async ({ input }) => {
const maxLength = 500;
const inputStr = typeof input === "string" ? input : JSON.stringify(input);

if (inputStr.length > maxLength) {
return {
tripwireTriggered: true,
outputInfo: {
reason: `Input exceeds maximum length of ${maxLength} characters`,
blocked: true,
},
};
}

return {
tripwireTriggered: false,
outputInfo: {
reason: "Input length is acceptable",
blocked: false,
},
};
},
};

// Create an agent with guardrails
const assistantAgent = new agentsSdk.Agent({
name: "SafeAssistant",
instructions:
"You are a helpful assistant. Answer user questions concisely and helpfully.",
inputGuardrails: [contentFilter, lengthGuardrail],
});

async function main() {
// Instrument using the SDK module from our static import
instrumentation.instrument(agentsSdk);

console.log("Running guardrails example...\n");

// Test 1: Normal input (should pass)
console.log("Test 1: Normal input");
console.log('Input: "What is the capital of France?"\n');
try {
const result1 = await agentsSdk.run(
assistantAgent,
"What is the capital of France?",
);
console.log("Response:", result1.finalOutput);
} catch (error) {
console.error("Guardrail triggered:", error);
}
console.log("\n---\n");

// Test 2: Input with blocked word (should trigger guardrail)
console.log("Test 2: Input with blocked word");
console.log('Input: "How do I hack into a computer?"\n');
try {
const result2 = await agentsSdk.run(
assistantAgent,
"How do I hack into a computer?",
);
console.log("Response:", result2.finalOutput);
} catch (error) {
if (error instanceof agentsSdk.InputGuardrailTripwireTriggered) {
console.log(
"Guardrail triggered! The content filter blocked this request.",
);
} else if (error instanceof Error) {
console.log("Error:", error.message);
}
}
console.log("\n---\n");

// Test 3: Another normal input (should pass)
console.log("Test 3: Another normal input");
console.log('Input: "Explain photosynthesis briefly"\n');
try {
const result3 = await agentsSdk.run(
assistantAgent,
"Explain photosynthesis briefly",
);
console.log("Response:", result3.finalOutput);
} catch (error) {
console.error("Error:", error);
}

// Force flush spans to ensure they are exported
await provider.forceFlush();

// Give time for spans to be exported
await new Promise((resolve) => setTimeout(resolve, 2000));

// Shutdown provider
await provider.shutdown();
}

main().catch(console.error);
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
/* eslint-disable no-console */
/**
* Handoffs example for OpenAI Agents SDK instrumentation
*
* This example demonstrates how to:
* 1. Set up OpenInference instrumentation for OpenAI Agents
* 2. Create multiple specialized agents
* 3. Use handoffs to transfer control between agents
* 4. Track agent handoffs in traces (graph.node.id, graph.node.parent_id)
*
* Prerequisites:
* - Set OPENAI_API_KEY environment variable
* - Install dependencies: @openai/agents, @opentelemetry/sdk-trace-node
*/

import { instrumentation, provider } from "./instrumentation";

// IMPORTANT: Import the SDK as a namespace so we can pass it to instrument()
import * as agentsSdk from "@openai/agents";

// Create a Spanish translator agent
const spanishAgent = new agentsSdk.Agent({
name: "SpanishTranslator",
instructions:
"You are a Spanish translator. Translate the user's message to Spanish. Only output the translation, nothing else.",
});

// Create a French translator agent
const frenchAgent = new agentsSdk.Agent({
name: "FrenchTranslator",
instructions:
"You are a French translator. Translate the user's message to French. Only output the translation, nothing else.",
});

// Create the main triage agent that routes to specialized agents
// Use Agent.create for proper handoff type inference
const triageAgent = agentsSdk.Agent.create({
name: "TriageAgent",
instructions: `You are a helpful assistant that routes translation requests to the appropriate translator.

- If the user wants to translate something to Spanish, hand off to the SpanishTranslator.
- If the user wants to translate something to French, hand off to the FrenchTranslator.
- For any other request, respond directly.`,
handoffs: [spanishAgent, frenchAgent],
});

async function main() {
// Instrument using the SDK module from our static import
instrumentation.instrument(agentsSdk);

console.log("Running handoffs example...\n");

try {
// Test Spanish handoff
console.log("Request: \"Translate 'Hello, how are you?' to Spanish\"\n");
const spanishResult = await agentsSdk.run(
triageAgent,
"Translate 'Hello, how are you?' to Spanish",
);
console.log("Spanish Translation:", spanishResult.finalOutput);
console.log("\n---\n");

// Test French handoff
console.log("Request: \"Translate 'Good morning!' to French\"\n");
const frenchResult = await agentsSdk.run(
triageAgent,
"Translate 'Good morning!' to French",
);
console.log("French Translation:", frenchResult.finalOutput);
} catch (error) {
console.error("Error running agent:", error);
}

// Force flush spans to ensure they are exported
await provider.forceFlush();

// Give time for spans to be exported
await new Promise((resolve) => setTimeout(resolve, 2000));

// Shutdown provider
await provider.shutdown();
}

main().catch(console.error);
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
/* eslint-disable no-console */
import { SEMRESATTRS_PROJECT_NAME } from "@arizeai/openinference-semantic-conventions";

import { diag, DiagConsoleLogger, DiagLogLevel } from "@opentelemetry/api";
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-proto";
import { Resource } from "@opentelemetry/resources";
import { ConsoleSpanExporter } from "@opentelemetry/sdk-trace-base";
import {
NodeTracerProvider,
SimpleSpanProcessor,
} from "@opentelemetry/sdk-trace-node";

import { OpenAIAgentsInstrumentation } from "../src";

// For troubleshooting, set the log level to DiagLogLevel.DEBUG
diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG);

const provider = new NodeTracerProvider({
resource: new Resource({
[SEMRESATTRS_PROJECT_NAME]: "openai-agents-service",
}),
spanProcessors: [
new SimpleSpanProcessor(new ConsoleSpanExporter()),
new SimpleSpanProcessor(
new OTLPTraceExporter({
url: "http://localhost:6006/v1/traces",
}),
),
],
});

provider.register();

// Create the instrumentation instance
const instrumentation = new OpenAIAgentsInstrumentation({
tracerProvider: provider,
});

console.log("OpenInference instrumentation configured");

// Export for use in other files
export { instrumentation, provider };
Loading
Loading