(chat)
OpenAI's API chat completions v1 endpoint
- create - Create chat completions
- createStream
This function processes chat completion requests by determining whether to use streaming or non-streaming response handling based on the request payload. For streaming requests, it configures additional options to track token usage.
Returns a Response containing either:
- A streaming SSE connection for real-time completions
- A single JSON response for non-streaming completions
Returns an error status code if:
- The request processing fails
- The streaming/non-streaming handlers encounter errors
- The underlying inference service returns an error
import { AtomaSDK } from "atoma-sdk";
const atomaSDK = new AtomaSDK({
bearerAuth: process.env["ATOMASDK_BEARER_AUTH"] ?? "",
});
async function run() {
const result = await atomaSDK.chat.create({
frequencyPenalty: 0,
functions: [
{
"name": "get_current_weather",
"description": "Get the current weather in a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The location to get the weather for",
},
},
"required": [
"location",
],
},
},
],
logitBias: {
"1234567890": 0.5,
"1234567891": -0.5,
},
maxCompletionTokens: 4096,
messages: [
{
content: "You are a helpful AI assistant",
name: "AI expert",
role: "system",
},
{
content: "Hello!",
name: "John Doe",
role: "user",
},
{
content: "I'm here to help you with any questions you have. How can I assist you today?",
name: "AI",
role: "assistant",
},
],
model: "meta-llama/Llama-3.3-70B-Instruct",
n: 1,
parallelToolCalls: true,
presencePenalty: 0,
seed: 123,
serviceTier: "auto",
stop: [
"json([\"stop\", \"halt\"])",
],
temperature: 0.7,
tools: [
{
function: {
description: "Get the current weather in a location",
name: "get_current_weather",
parameters: {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The location to get the weather for",
},
},
"required": [
"location",
],
},
},
type: "function",
},
],
topLogprobs: 1,
topP: 1,
user: "user-1234",
});
// Handle the result
console.log(result);
}
run();The standalone function version of this method:
import { AtomaSDKCore } from "atoma-sdk/core.js";
import { chatCreate } from "atoma-sdk/funcs/chatCreate.js";
// Use `AtomaSDKCore` for best tree-shaking performance.
// You can create one instance of it to use across an application.
const atomaSDK = new AtomaSDKCore({
bearerAuth: process.env["ATOMASDK_BEARER_AUTH"] ?? "",
});
async function run() {
const res = await chatCreate(atomaSDK, {
frequencyPenalty: 0,
functions: [
{
"name": "get_current_weather",
"description": "Get the current weather in a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The location to get the weather for",
},
},
"required": [
"location",
],
},
},
],
logitBias: {
"1234567890": 0.5,
"1234567891": -0.5,
},
maxCompletionTokens: 4096,
messages: [
{
content: "You are a helpful AI assistant",
name: "AI expert",
role: "system",
},
{
content: "Hello!",
name: "John Doe",
role: "user",
},
{
content: "I'm here to help you with any questions you have. How can I assist you today?",
name: "AI",
role: "assistant",
},
],
model: "meta-llama/Llama-3.3-70B-Instruct",
n: 1,
parallelToolCalls: true,
presencePenalty: 0,
seed: 123,
serviceTier: "auto",
stop: [
"json([\"stop\", \"halt\"])",
],
temperature: 0.7,
tools: [
{
function: {
description: "Get the current weather in a location",
name: "get_current_weather",
parameters: {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The location to get the weather for",
},
},
"required": [
"location",
],
},
},
type: "function",
},
],
topLogprobs: 1,
topP: 1,
user: "user-1234",
});
if (!res.ok) {
throw res.error;
}
const { value: result } = res;
// Handle the result
console.log(result);
}
run();| Parameter | Type | Required | Description |
|---|---|---|---|
request |
components.CreateChatCompletionRequest | ✔️ | The request object to use for the request. |
options |
RequestOptions | ➖ | Used to set various options for making HTTP requests. |
options.fetchOptions |
RequestInit | ➖ | Options that are passed to the underlying HTTP request. This can be used to inject extra headers for examples. All Request options, except method and body, are allowed. |
options.retries |
RetryConfig | ➖ | Enables retrying HTTP requests under certain failure conditions. |
Promise<components.ChatCompletionResponse>
| Error Type | Status Code | Content Type |
|---|---|---|
| errors.APIError | 4XX, 5XX | */* |
import { AtomaSDK } from "atoma-sdk";
const atomaSDK = new AtomaSDK({
bearerAuth: process.env["ATOMASDK_BEARER_AUTH"] ?? "",
});
async function run() {
const result = await atomaSDK.chat.createStream({
frequencyPenalty: 0,
functions: [
{
"name": "get_current_weather",
"description": "Get the current weather in a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The location to get the weather for",
},
},
"required": [
"location",
],
},
},
],
logitBias: {
"1234567890": 0.5,
"1234567891": -0.5,
},
maxCompletionTokens: 4096,
messages: [
{
content: "You are a helpful AI assistant",
name: "AI expert",
role: "system",
},
{
content: "Hello!",
name: "John Doe",
role: "user",
},
{
content: "I'm here to help you with any questions you have. How can I assist you today?",
name: "AI",
role: "assistant",
},
],
model: "meta-llama/Llama-3.3-70B-Instruct",
n: 1,
parallelToolCalls: true,
presencePenalty: 0,
seed: 123,
serviceTier: "auto",
stop: [
"json([\"stop\", \"halt\"])",
],
temperature: 0.7,
tools: [
{
function: {
description: "Get the current weather in a location",
name: "get_current_weather",
parameters: {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The location to get the weather for",
},
},
"required": [
"location",
],
},
},
type: "function",
},
],
topLogprobs: 1,
topP: 1,
user: "user-1234",
});
for await (const event of result) {
// Handle the event
console.log(event);
}
}
run();The standalone function version of this method:
import { AtomaSDKCore } from "atoma-sdk/core.js";
import { chatCreateStream } from "atoma-sdk/funcs/chatCreateStream.js";
// Use `AtomaSDKCore` for best tree-shaking performance.
// You can create one instance of it to use across an application.
const atomaSDK = new AtomaSDKCore({
bearerAuth: process.env["ATOMASDK_BEARER_AUTH"] ?? "",
});
async function run() {
const res = await chatCreateStream(atomaSDK, {
frequencyPenalty: 0,
functions: [
{
"name": "get_current_weather",
"description": "Get the current weather in a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The location to get the weather for",
},
},
"required": [
"location",
],
},
},
],
logitBias: {
"1234567890": 0.5,
"1234567891": -0.5,
},
maxCompletionTokens: 4096,
messages: [
{
content: "You are a helpful AI assistant",
name: "AI expert",
role: "system",
},
{
content: "Hello!",
name: "John Doe",
role: "user",
},
{
content: "I'm here to help you with any questions you have. How can I assist you today?",
name: "AI",
role: "assistant",
},
],
model: "meta-llama/Llama-3.3-70B-Instruct",
n: 1,
parallelToolCalls: true,
presencePenalty: 0,
seed: 123,
serviceTier: "auto",
stop: [
"json([\"stop\", \"halt\"])",
],
temperature: 0.7,
tools: [
{
function: {
description: "Get the current weather in a location",
name: "get_current_weather",
parameters: {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The location to get the weather for",
},
},
"required": [
"location",
],
},
},
type: "function",
},
],
topLogprobs: 1,
topP: 1,
user: "user-1234",
});
if (!res.ok) {
throw res.error;
}
const { value: result } = res;
for await (const event of result) {
// Handle the event
console.log(event);
}
}
run();| Parameter | Type | Required | Description |
|---|---|---|---|
request |
components.CreateChatCompletionStreamRequest | ✔️ | The request object to use for the request. |
options |
RequestOptions | ➖ | Used to set various options for making HTTP requests. |
options.fetchOptions |
RequestInit | ➖ | Options that are passed to the underlying HTTP request. This can be used to inject extra headers for examples. All Request options, except method and body, are allowed. |
options.retries |
RetryConfig | ➖ | Enables retrying HTTP requests under certain failure conditions. |
Promise<EventStream<operations.ChatCompletionsCreateStreamResponseBody>>
| Error Type | Status Code | Content Type |
|---|---|---|
| errors.APIError | 4XX, 5XX | */* |