- Inference Gateway Python SDK
A modern Python SDK for interacting with the Inference Gateway, providing a unified interface to multiple AI providers.
- 🔗 Unified interface for multiple AI providers (OpenAI, Anthropic, Ollama, etc.)
- 🛡️ Type-safe operations using Pydantic models
- ⚡ Support for both synchronous and streaming responses
- 🚨 Built-in error handling and validation
- 🔄 Proxy requests directly to provider APIs
pip install inference-gateway
from inference_gateway import InferenceGatewayClient, Message
# Initialize client
client = InferenceGatewayClient("http://localhost:8080/v1")
# Simple chat completion
response = client.create_chat_completion(
model="openai/gpt-4",
messages=[
Message(role="system", content="You are a helpful assistant"),
Message(role="user", content="Hello!")
]
)
print(response.choices[0].message.content)
- Python 3.8+
requests
orhttpx
(for HTTP client)pydantic
(for data validation)
from inference_gateway import InferenceGatewayClient
# Basic configuration
client = InferenceGatewayClient("http://localhost:8080/v1")
# With authentication
client = InferenceGatewayClient(
"http://localhost:8080/v1",
token="your-api-token",
timeout=60.0 # Custom timeout
)
# Using httpx instead of requests
client = InferenceGatewayClient(
"http://localhost:8080/v1",
use_httpx=True
)
# List all available models
models = client.list_models()
print("All models:", models)
# Filter by provider
openai_models = client.list_models(provider="openai")
print("OpenAI models:", openai_models)
from inference_gateway import Message
response = client.create_chat_completion(
model="openai/gpt-4",
messages=[
Message(role="system", content="You are a helpful assistant"),
Message(role="user", content="Explain quantum computing")
],
max_tokens=500
)
print(response.choices[0].message.content)
from inference_gateway.models import CreateChatCompletionStreamResponse
from pydantic import ValidationError
import json
# Streaming returns SSEvent objects
for chunk in client.create_chat_completion_stream(
model="ollama/llama2",
messages=[
Message(role="user", content="Tell me a story")
]
):
if chunk.data:
try:
# Parse the raw JSON data
data = json.loads(chunk.data)
# Unmarshal to structured model for type safety
try:
structured_chunk = CreateChatCompletionStreamResponse.model_validate(data)
# Use the structured model for better type safety and IDE support
if structured_chunk.choices and len(structured_chunk.choices) > 0:
choice = structured_chunk.choices[0]
if hasattr(choice.delta, 'content') and choice.delta.content:
print(choice.delta.content, end="", flush=True)
except ValidationError:
# Fallback to manual parsing for non-standard chunks
if "choices" in data and len(data["choices"]) > 0:
delta = data["choices"][0].get("delta", {})
if "content" in delta and delta["content"]:
print(delta["content"], end="", flush=True)
except json.JSONDecodeError:
pass
# Proxy request to OpenAI's API
response = client.proxy_request(
provider="openai",
path="/v1/models",
method="GET"
)
print("OpenAI models:", response)
if client.health_check():
print("API is healthy")
else:
print("API is unavailable")
The SDK provides several exception types:
try:
response = client.create_chat_completion(...)
except InferenceGatewayAPIError as e:
print(f"API Error: {e} (Status: {e.status_code})")
print("Response:", e.response_data)
except InferenceGatewayValidationError as e:
print(f"Validation Error: {e}")
except InferenceGatewayError as e:
print(f"General Error: {e}")
# Define a weather tool using type-safe Pydantic models
from inference_gateway.models import ChatCompletionTool, FunctionObject, FunctionParameters
weather_tool = ChatCompletionTool(
type="function",
function=FunctionObject(
name="get_current_weather",
description="Get the current weather in a given location",
parameters=FunctionParameters(
type="object",
properties={
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The temperature unit to use"
}
},
required=["location"]
)
)
)
# Using tools in a chat completion
response = client.create_chat_completion(
model="openai/gpt-4",
messages=[
Message(role="system", content="You are a helpful assistant with access to weather information"),
Message(role="user", content="What is the weather like in New York?")
],
tools=[weather_tool] # Pass the tool definition
)
print(response.choices[0].message.content)
# Check if the model made a tool call
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
print(f"Tool called: {tool_call.function.name}")
print(f"Arguments: {tool_call.function.arguments}")
# List available MCP tools (requires MCP_ENABLE and MCP_EXPOSE to be set on the gateway)
tools = client.list_tools()
print("Available tools:", tools)
Server-Side Tool Management
The SDK currently supports listing available MCP tools, which is particularly useful for UI applications that need to display connected tools to users. The key advantage is that tools are managed server-side:
- Automatic Tool Injection: Tools are automatically inferred and injected into requests by the Inference Gateway server
- Simplified Client Code: No need to manually manage or configure tools in your client application
- Transparent Tool Calls: During streaming chat completions with configured MCP servers, tool calls appear in the response stream - no special handling required except optionally displaying them to users
This architecture allows you to focus on LLM interactions while the gateway handles all tool management complexities behind the scenes.
# With custom headers
client = InferenceGatewayClient(
"http://localhost:8080/v1",
headers={"X-Custom-Header": "value"}
)
# With proxy settings
client = InferenceGatewayClient(
"http://localhost:8080/v1",
proxies={"http": "http://proxy.example.com"}
)
For comprehensive examples demonstrating various use cases, see the examples directory:
- List LLMs - How to list available models
- Chat - Basic and advanced chat completion examples
- Tools - Working with function tools
- MCP - Model Context Protocol integration examples
Each example includes a detailed README with setup instructions and explanations.
This SDK is distributed under the MIT License, see LICENSE for more information.