Skip to content

Commit 0717222

Browse files
feat: add support of IBM models to AI API (box/box-openapi#522) (#436)
1 parent af1dc12 commit 0717222

File tree

17 files changed

+197
-63
lines changed

17 files changed

+197
-63
lines changed

.codegen.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
{ "engineHash": "c2a365c", "specHash": "6a332e7", "version": "0.6.1" }
1+
{ "engineHash": "c2a365c", "specHash": "4e677e3", "version": "0.6.1" }

BoxSdkGen.xcodeproj/project.pbxproj

+52-20
Large diffs are not rendered by default.

Sources/Schemas/AiAgentBasicGenTool/AiAgentBasicGenTool.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ public class AiAgentBasicGenTool: AiAgentLongTextToolTextGen {
1515
/// - Parameters:
1616
/// - model: The model used for the AI agent for basic text. For specific model values, see the [available models list](g://box-ai/supported-models).
1717
/// - numTokensForCompletion: The number of tokens for completion.
18-
/// - llmEndpointParams: The parameters for the LLM endpoint specific to OpenAI / Google models.
18+
/// - llmEndpointParams:
1919
/// - systemMessage: System messages aim at helping the LLM understand its role and what it is supposed to do.
2020
/// The input for `{current_date}` is optional, depending on the use.
2121
/// - promptTemplate: The prompt template contains contextual information of the request and the user prompt.
@@ -25,7 +25,7 @@ public class AiAgentBasicGenTool: AiAgentLongTextToolTextGen {
2525
/// - embeddings:
2626
/// - contentTemplate: How the content should be included in a request to the LLM.
2727
/// Input for `{content}` is optional, depending on the use.
28-
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi? = nil, systemMessage: String? = nil, promptTemplate: String? = nil, embeddings: AiAgentLongTextToolTextGenEmbeddingsField? = nil, contentTemplate: String? = nil) {
28+
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParams? = nil, systemMessage: String? = nil, promptTemplate: String? = nil, embeddings: AiAgentLongTextToolTextGenEmbeddingsField? = nil, contentTemplate: String? = nil) {
2929
self.contentTemplate = contentTemplate
3030

3131
super.init(model: model, numTokensForCompletion: numTokensForCompletion, llmEndpointParams: llmEndpointParams, systemMessage: systemMessage, promptTemplate: promptTemplate, embeddings: embeddings)

Sources/Schemas/AiAgentBasicTextTool/AiAgentBasicTextTool.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,12 @@ public class AiAgentBasicTextTool: AiAgentBasicTextToolBase {
2020
/// - Parameters:
2121
/// - model: The model used for the AI agent for basic text. For specific model values, see the [available models list](g://box-ai/supported-models).
2222
/// - numTokensForCompletion: The number of tokens for completion.
23-
/// - llmEndpointParams: The parameters for the LLM endpoint specific to OpenAI / Google models.
23+
/// - llmEndpointParams:
2424
/// - systemMessage: System messages try to help the LLM "understand" its role and what it is supposed to do.
2525
/// - promptTemplate: The prompt template contains contextual information of the request and the user prompt.
2626
/// When passing `prompt_template` parameters, you **must include** inputs for `{user_question}` and `{content}`.
2727
/// `{current_date}` is optional, depending on the use.
28-
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi? = nil, systemMessage: String? = nil, promptTemplate: String? = nil) {
28+
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParams? = nil, systemMessage: String? = nil, promptTemplate: String? = nil) {
2929
self.systemMessage = systemMessage
3030
self.promptTemplate = promptTemplate
3131

Sources/Schemas/AiAgentBasicTextToolBase/AiAgentBasicTextToolBase.swift

+4-5
Original file line numberDiff line numberDiff line change
@@ -14,16 +14,15 @@ public class AiAgentBasicTextToolBase: Codable {
1414
/// The number of tokens for completion.
1515
public let numTokensForCompletion: Int64?
1616

17-
/// The parameters for the LLM endpoint specific to OpenAI / Google models.
18-
public let llmEndpointParams: AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi?
17+
public let llmEndpointParams: AiLlmEndpointParams?
1918

2019
/// Initializer for a AiAgentBasicTextToolBase.
2120
///
2221
/// - Parameters:
2322
/// - model: The model used for the AI agent for basic text. For specific model values, see the [available models list](g://box-ai/supported-models).
2423
/// - numTokensForCompletion: The number of tokens for completion.
25-
/// - llmEndpointParams: The parameters for the LLM endpoint specific to OpenAI / Google models.
26-
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi? = nil) {
24+
/// - llmEndpointParams:
25+
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParams? = nil) {
2726
self.model = model
2827
self.numTokensForCompletion = numTokensForCompletion
2928
self.llmEndpointParams = llmEndpointParams
@@ -33,7 +32,7 @@ public class AiAgentBasicTextToolBase: Codable {
3332
let container = try decoder.container(keyedBy: CodingKeys.self)
3433
model = try container.decodeIfPresent(String.self, forKey: .model)
3534
numTokensForCompletion = try container.decodeIfPresent(Int64.self, forKey: .numTokensForCompletion)
36-
llmEndpointParams = try container.decodeIfPresent(AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi.self, forKey: .llmEndpointParams)
35+
llmEndpointParams = try container.decodeIfPresent(AiLlmEndpointParams.self, forKey: .llmEndpointParams)
3736
}
3837

3938
public func encode(to encoder: Encoder) throws {

Sources/Schemas/AiAgentBasicTextToolTextGen/AiAgentBasicTextToolTextGen.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,14 +22,14 @@ public class AiAgentBasicTextToolTextGen: AiAgentBasicTextToolBase {
2222
/// - Parameters:
2323
/// - model: The model used for the AI agent for basic text. For specific model values, see the [available models list](g://box-ai/supported-models).
2424
/// - numTokensForCompletion: The number of tokens for completion.
25-
/// - llmEndpointParams: The parameters for the LLM endpoint specific to OpenAI / Google models.
25+
/// - llmEndpointParams:
2626
/// - systemMessage: System messages aim at helping the LLM understand its role and what it is supposed to do.
2727
/// The input for `{current_date}` is optional, depending on the use.
2828
/// - promptTemplate: The prompt template contains contextual information of the request and the user prompt.
2929
///
3030
/// When using the `prompt_template` parameter, you **must include** input for `{user_question}`.
3131
/// Inputs for `{current_date}` and `{content}` are optional, depending on the use.
32-
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi? = nil, systemMessage: String? = nil, promptTemplate: String? = nil) {
32+
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParams? = nil, systemMessage: String? = nil, promptTemplate: String? = nil) {
3333
self.systemMessage = systemMessage
3434
self.promptTemplate = promptTemplate
3535

Sources/Schemas/AiAgentLongTextTool/AiAgentLongTextTool.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,13 @@ public class AiAgentLongTextTool: AiAgentBasicTextTool {
1313
/// - Parameters:
1414
/// - model: The model used for the AI agent for basic text. For specific model values, see the [available models list](g://box-ai/supported-models).
1515
/// - numTokensForCompletion: The number of tokens for completion.
16-
/// - llmEndpointParams: The parameters for the LLM endpoint specific to OpenAI / Google models.
16+
/// - llmEndpointParams:
1717
/// - systemMessage: System messages try to help the LLM "understand" its role and what it is supposed to do.
1818
/// - promptTemplate: The prompt template contains contextual information of the request and the user prompt.
1919
/// When passing `prompt_template` parameters, you **must include** inputs for `{user_question}` and `{content}`.
2020
/// `{current_date}` is optional, depending on the use.
2121
/// - embeddings:
22-
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi? = nil, systemMessage: String? = nil, promptTemplate: String? = nil, embeddings: AiAgentLongTextToolEmbeddingsField? = nil) {
22+
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParams? = nil, systemMessage: String? = nil, promptTemplate: String? = nil, embeddings: AiAgentLongTextToolEmbeddingsField? = nil) {
2323
self.embeddings = embeddings
2424

2525
super.init(model: model, numTokensForCompletion: numTokensForCompletion, llmEndpointParams: llmEndpointParams, systemMessage: systemMessage, promptTemplate: promptTemplate)

Sources/Schemas/AiAgentLongTextToolTextGen/AiAgentLongTextToolTextGen.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,15 @@ public class AiAgentLongTextToolTextGen: AiAgentBasicTextToolTextGen {
1313
/// - Parameters:
1414
/// - model: The model used for the AI agent for basic text. For specific model values, see the [available models list](g://box-ai/supported-models).
1515
/// - numTokensForCompletion: The number of tokens for completion.
16-
/// - llmEndpointParams: The parameters for the LLM endpoint specific to OpenAI / Google models.
16+
/// - llmEndpointParams:
1717
/// - systemMessage: System messages aim at helping the LLM understand its role and what it is supposed to do.
1818
/// The input for `{current_date}` is optional, depending on the use.
1919
/// - promptTemplate: The prompt template contains contextual information of the request and the user prompt.
2020
///
2121
/// When using the `prompt_template` parameter, you **must include** input for `{user_question}`.
2222
/// Inputs for `{current_date}` and `{content}` are optional, depending on the use.
2323
/// - embeddings:
24-
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi? = nil, systemMessage: String? = nil, promptTemplate: String? = nil, embeddings: AiAgentLongTextToolTextGenEmbeddingsField? = nil) {
24+
public init(model: String? = nil, numTokensForCompletion: Int64? = nil, llmEndpointParams: AiLlmEndpointParams? = nil, systemMessage: String? = nil, promptTemplate: String? = nil, embeddings: AiAgentLongTextToolTextGenEmbeddingsField? = nil) {
2525
self.embeddings = embeddings
2626

2727
super.init(model: model, numTokensForCompletion: numTokensForCompletion, llmEndpointParams: llmEndpointParams, systemMessage: systemMessage, promptTemplate: promptTemplate)
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
import Foundation
22

3-
public enum AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi: Codable {
4-
case aiLlmEndpointParamsAws(AiLlmEndpointParamsAws)
5-
case aiLlmEndpointParamsGoogle(AiLlmEndpointParamsGoogle)
3+
/// The parameters for the LLM endpoint specific to a model.
4+
public enum AiLlmEndpointParams: Codable {
65
case aiLlmEndpointParamsOpenAi(AiLlmEndpointParamsOpenAi)
6+
case aiLlmEndpointParamsGoogle(AiLlmEndpointParamsGoogle)
7+
case aiLlmEndpointParamsAws(AiLlmEndpointParamsAws)
8+
case aiLlmEndpointParamsIbm(AiLlmEndpointParamsIbm)
79

810
private enum DiscriminatorCodingKey: String, CodingKey {
911
case type
@@ -13,9 +15,9 @@ public enum AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointPara
1315
if let container = try? decoder.container(keyedBy: DiscriminatorCodingKey.self) {
1416
if let discriminator_0 = try? container.decode(String.self, forKey: .type) {
1517
switch discriminator_0 {
16-
case "aws_params":
17-
if let content = try? AiLlmEndpointParamsAws(from: decoder) {
18-
self = .aiLlmEndpointParamsAws(content)
18+
case "openai_params":
19+
if let content = try? AiLlmEndpointParamsOpenAi(from: decoder) {
20+
self = .aiLlmEndpointParamsOpenAi(content)
1921
return
2022
}
2123

@@ -25,32 +27,40 @@ public enum AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointPara
2527
return
2628
}
2729

28-
case "openai_params":
29-
if let content = try? AiLlmEndpointParamsOpenAi(from: decoder) {
30-
self = .aiLlmEndpointParamsOpenAi(content)
30+
case "aws_params":
31+
if let content = try? AiLlmEndpointParamsAws(from: decoder) {
32+
self = .aiLlmEndpointParamsAws(content)
33+
return
34+
}
35+
36+
case "ibm_params":
37+
if let content = try? AiLlmEndpointParamsIbm(from: decoder) {
38+
self = .aiLlmEndpointParamsIbm(content)
3139
return
3240
}
3341

3442
default:
35-
throw DecodingError.typeMismatch(AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi.self, DecodingError.Context(codingPath: decoder.codingPath, debugDescription: "The Decoded object contains an unexpected value for key type"))
43+
throw DecodingError.typeMismatch(AiLlmEndpointParams.self, DecodingError.Context(codingPath: decoder.codingPath, debugDescription: "The Decoded object contains an unexpected value for key type"))
3644

3745
}
3846
}
3947

4048
}
4149

42-
throw DecodingError.typeMismatch(AiLlmEndpointParamsAwsOrAiLlmEndpointParamsGoogleOrAiLlmEndpointParamsOpenAi.self, DecodingError.Context(codingPath: decoder.codingPath, debugDescription: "The type of the decoded object cannot be determined."))
50+
throw DecodingError.typeMismatch(AiLlmEndpointParams.self, DecodingError.Context(codingPath: decoder.codingPath, debugDescription: "The type of the decoded object cannot be determined."))
4351

4452
}
4553

4654
public func encode(to encoder: Encoder) throws {
4755
switch self {
48-
case .aiLlmEndpointParamsAws(let aiLlmEndpointParamsAws):
49-
try aiLlmEndpointParamsAws.encode(to: encoder)
50-
case .aiLlmEndpointParamsGoogle(let aiLlmEndpointParamsGoogle):
51-
try aiLlmEndpointParamsGoogle.encode(to: encoder)
5256
case .aiLlmEndpointParamsOpenAi(let aiLlmEndpointParamsOpenAi):
5357
try aiLlmEndpointParamsOpenAi.encode(to: encoder)
58+
case .aiLlmEndpointParamsGoogle(let aiLlmEndpointParamsGoogle):
59+
try aiLlmEndpointParamsGoogle.encode(to: encoder)
60+
case .aiLlmEndpointParamsAws(let aiLlmEndpointParamsAws):
61+
try aiLlmEndpointParamsAws.encode(to: encoder)
62+
case .aiLlmEndpointParamsIbm(let aiLlmEndpointParamsIbm):
63+
try aiLlmEndpointParamsIbm.encode(to: encoder)
5464
}
5565
}
5666

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
import Foundation
2+
3+
/// AI LLM endpoint params IBM object
4+
public class AiLlmEndpointParamsIbm: Codable {
5+
private enum CodingKeys: String, CodingKey {
6+
case type
7+
case temperature
8+
case topP = "top_p"
9+
case topK = "top_k"
10+
}
11+
12+
/// The type of the AI LLM endpoint params object for IBM.
13+
/// This parameter is **required**.
14+
public let type: AiLlmEndpointParamsIbmTypeField
15+
16+
/// What sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random,
17+
/// while lower values like 0.2 will make it more focused and deterministic.
18+
/// We generally recommend altering this or `top_p` but not both.
19+
@CodableTriState public private(set) var temperature: Double?
20+
21+
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results
22+
/// of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability
23+
/// mass are considered. We generally recommend altering this or temperature but not both.
24+
@CodableTriState public private(set) var topP: Double?
25+
26+
/// `Top-K` changes how the model selects tokens for output. A `top-K` of 1 means the next selected token is
27+
/// the most probable among all tokens in the model's vocabulary (also called greedy decoding),
28+
/// while a `top-K` of 3 means that the next token is selected from among the three most probable tokens by using temperature.
29+
@CodableTriState public private(set) var topK: Double?
30+
31+
/// Initializer for a AiLlmEndpointParamsIbm.
32+
///
33+
/// - Parameters:
34+
/// - type: The type of the AI LLM endpoint params object for IBM.
35+
/// This parameter is **required**.
36+
/// - temperature: What sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random,
37+
/// while lower values like 0.2 will make it more focused and deterministic.
38+
/// We generally recommend altering this or `top_p` but not both.
39+
/// - topP: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results
40+
/// of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability
41+
/// mass are considered. We generally recommend altering this or temperature but not both.
42+
/// - topK: `Top-K` changes how the model selects tokens for output. A `top-K` of 1 means the next selected token is
43+
/// the most probable among all tokens in the model's vocabulary (also called greedy decoding),
44+
/// while a `top-K` of 3 means that the next token is selected from among the three most probable tokens by using temperature.
45+
public init(type: AiLlmEndpointParamsIbmTypeField = AiLlmEndpointParamsIbmTypeField.ibmParams, temperature: TriStateField<Double> = nil, topP: TriStateField<Double> = nil, topK: TriStateField<Double> = nil) {
46+
self.type = type
47+
self._temperature = CodableTriState(state: temperature)
48+
self._topP = CodableTriState(state: topP)
49+
self._topK = CodableTriState(state: topK)
50+
}
51+
52+
required public init(from decoder: Decoder) throws {
53+
let container = try decoder.container(keyedBy: CodingKeys.self)
54+
type = try container.decode(AiLlmEndpointParamsIbmTypeField.self, forKey: .type)
55+
temperature = try container.decodeIfPresent(Double.self, forKey: .temperature)
56+
topP = try container.decodeIfPresent(Double.self, forKey: .topP)
57+
topK = try container.decodeIfPresent(Double.self, forKey: .topK)
58+
}
59+
60+
public func encode(to encoder: Encoder) throws {
61+
var container = encoder.container(keyedBy: CodingKeys.self)
62+
try container.encode(type, forKey: .type)
63+
try container.encode(field: _temperature.state, forKey: .temperature)
64+
try container.encode(field: _topP.state, forKey: .topP)
65+
try container.encode(field: _topK.state, forKey: .topK)
66+
}
67+
68+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
import Foundation
2+
3+
public enum AiLlmEndpointParamsIbmTypeField: CodableStringEnum {
4+
case ibmParams
5+
case customValue(String)
6+
7+
public init(rawValue value: String) {
8+
switch value.lowercased() {
9+
case "ibm_params".lowercased():
10+
self = .ibmParams
11+
default:
12+
self = .customValue(value)
13+
}
14+
}
15+
16+
public var rawValue: String {
17+
switch self {
18+
case .ibmParams:
19+
return "ibm_params"
20+
case .customValue(let value):
21+
return value
22+
}
23+
}
24+
25+
}

0 commit comments

Comments
 (0)