Skip to content

feat: add support of IBM models to AI API #522

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 8, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 61 additions & 13 deletions openapi.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
"url": "http://www.apache.org/licenses/LICENSE-2.0"
},
"version": "2024.0",
"x-box-commit-hash": "f5abf083c2"
"x-box-commit-hash": "6f7e141054"
},
"servers": [
{
Expand Down Expand Up @@ -25345,18 +25345,7 @@
"minimum": 1
},
"llm_endpoint_params": {
"description": "The parameters for the LLM endpoint specific to OpenAI / Google models.",
"oneOf": [
{
"$ref": "#/components/schemas/AiLlmEndpointParamsOpenAi"
},
{
"$ref": "#/components/schemas/AiLlmEndpointParamsGoogle"
},
{
"$ref": "#/components/schemas/AiLlmEndpointParamsAWS"
}
]
"$ref": "#/components/schemas/AiLlmEndpointParams"
}
},
"title": "AI agent basic text tool",
Expand Down Expand Up @@ -26006,6 +25995,25 @@
],
"title": "AI Item Ask"
},
"AiLlmEndpointParams": {
"description": "The parameters for the LLM endpoint specific to a model.",
"type": "object",
"oneOf": [
{
"$ref": "#/components/schemas/AiLlmEndpointParamsOpenAi"
},
{
"$ref": "#/components/schemas/AiLlmEndpointParamsGoogle"
},
{
"$ref": "#/components/schemas/AiLlmEndpointParamsAWS"
},
{
"$ref": "#/components/schemas/AiLlmEndpointParamsIBM"
}
],
"title": "AI LLM endpoint parameters"
},
"AiLlmEndpointParamsAWS": {
"description": "AI LLM endpoint params AWS object",
"type": "object",
Expand Down Expand Up @@ -26086,6 +26094,46 @@
"title": "AI LLM endpoint params Google",
"x-box-resource-id": "ai_llm_endpoint_params_google"
},
"AiLlmEndpointParamsIBM": {
"description": "AI LLM endpoint params IBM object",
"type": "object",
"properties": {
"type": {
"description": "The type of the AI LLM endpoint params object for IBM.\nThis parameter is **required**.",
"type": "string",
"example": "ibm_params",
"enum": [
"ibm_params"
],
"nullable": false
},
"temperature": {
"description": "What sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, \nwhile lower values like 0.2 will make it more focused and deterministic. \nWe generally recommend altering this or `top_p` but not both.",
"type": "number",
"example": 0.5,
"nullable": true
},
"top_p": {
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results \nof the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability \nmass are considered. We generally recommend altering this or temperature but not both.",
"type": "number",
"example": 0.5,
"maximum": 1,
"minimum": 0.1,
"nullable": true
},
"top_k": {
"description": "`Top-K` changes how the model selects tokens for output. A `top-K` of 1 means the next selected token is\nthe most probable among all tokens in the model's vocabulary (also called greedy decoding),\nwhile a `top-K` of 3 means that the next token is selected from among the three most probable tokens by using temperature.",
"type": "number",
"example": 1,
"nullable": true
}
},
"required": [
"type"
],
"title": "AI LLM endpoint params IBM",
"x-box-resource-id": "ai_llm_endpoint_params_ibm"
},
"AiLlmEndpointParamsOpenAi": {
"description": "AI LLM endpoint params OpenAI object.",
"type": "object",
Expand Down
2 changes: 1 addition & 1 deletion openapi/openapi-v2025.0.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
"url": "http://www.apache.org/licenses/LICENSE-2.0"
},
"version": "2025.0",
"x-box-commit-hash": "f5abf083c2"
"x-box-commit-hash": "6f7e141054"
},
"servers": [
{
Expand Down
74 changes: 61 additions & 13 deletions openapi/openapi.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
"url": "http://www.apache.org/licenses/LICENSE-2.0"
},
"version": "2024.0",
"x-box-commit-hash": "f5abf083c2"
"x-box-commit-hash": "6f7e141054"
},
"servers": [
{
Expand Down Expand Up @@ -25345,18 +25345,7 @@
"minimum": 1
},
"llm_endpoint_params": {
"description": "The parameters for the LLM endpoint specific to OpenAI / Google models.",
"oneOf": [
{
"$ref": "#/components/schemas/AiLlmEndpointParamsOpenAi"
},
{
"$ref": "#/components/schemas/AiLlmEndpointParamsGoogle"
},
{
"$ref": "#/components/schemas/AiLlmEndpointParamsAWS"
}
]
"$ref": "#/components/schemas/AiLlmEndpointParams"
}
},
"title": "AI agent basic text tool",
Expand Down Expand Up @@ -26006,6 +25995,25 @@
],
"title": "AI Item Ask"
},
"AiLlmEndpointParams": {
"description": "The parameters for the LLM endpoint specific to a model.",
"type": "object",
"oneOf": [
{
"$ref": "#/components/schemas/AiLlmEndpointParamsOpenAi"
},
{
"$ref": "#/components/schemas/AiLlmEndpointParamsGoogle"
},
{
"$ref": "#/components/schemas/AiLlmEndpointParamsAWS"
},
{
"$ref": "#/components/schemas/AiLlmEndpointParamsIBM"
}
],
"title": "AI LLM endpoint parameters"
},
"AiLlmEndpointParamsAWS": {
"description": "AI LLM endpoint params AWS object",
"type": "object",
Expand Down Expand Up @@ -26086,6 +26094,46 @@
"title": "AI LLM endpoint params Google",
"x-box-resource-id": "ai_llm_endpoint_params_google"
},
"AiLlmEndpointParamsIBM": {
"description": "AI LLM endpoint params IBM object",
"type": "object",
"properties": {
"type": {
"description": "The type of the AI LLM endpoint params object for IBM.\nThis parameter is **required**.",
"type": "string",
"example": "ibm_params",
"enum": [
"ibm_params"
],
"nullable": false
},
"temperature": {
"description": "What sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, \nwhile lower values like 0.2 will make it more focused and deterministic. \nWe generally recommend altering this or `top_p` but not both.",
"type": "number",
"example": 0.5,
"nullable": true
},
"top_p": {
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results \nof the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability \nmass are considered. We generally recommend altering this or temperature but not both.",
"type": "number",
"example": 0.5,
"maximum": 1,
"minimum": 0.1,
"nullable": true
},
"top_k": {
"description": "`Top-K` changes how the model selects tokens for output. A `top-K` of 1 means the next selected token is\nthe most probable among all tokens in the model's vocabulary (also called greedy decoding),\nwhile a `top-K` of 3 means that the next token is selected from among the three most probable tokens by using temperature.",
"type": "number",
"example": 1,
"nullable": true
}
},
"required": [
"type"
],
"title": "AI LLM endpoint params IBM",
"x-box-resource-id": "ai_llm_endpoint_params_ibm"
},
"AiLlmEndpointParamsOpenAi": {
"description": "AI LLM endpoint params OpenAI object.",
"type": "object",
Expand Down
Loading