|
| 1 | +import Foundation |
| 2 | + |
| 3 | +/// AI LLM endpoint params IBM object |
| 4 | +public class AiLlmEndpointParamsIbm: Codable { |
| 5 | + private enum CodingKeys: String, CodingKey { |
| 6 | + case type |
| 7 | + case temperature |
| 8 | + case topP = "top_p" |
| 9 | + case topK = "top_k" |
| 10 | + } |
| 11 | + |
| 12 | + /// The type of the AI LLM endpoint params object for IBM. |
| 13 | + /// This parameter is **required**. |
| 14 | + public let type: AiLlmEndpointParamsIbmTypeField |
| 15 | + |
| 16 | + /// What sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, |
| 17 | + /// while lower values like 0.2 will make it more focused and deterministic. |
| 18 | + /// We generally recommend altering this or `top_p` but not both. |
| 19 | + @CodableTriState public private(set) var temperature: Double? |
| 20 | + |
| 21 | + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results |
| 22 | + /// of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability |
| 23 | + /// mass are considered. We generally recommend altering this or temperature but not both. |
| 24 | + @CodableTriState public private(set) var topP: Double? |
| 25 | + |
| 26 | + /// `Top-K` changes how the model selects tokens for output. A `top-K` of 1 means the next selected token is |
| 27 | + /// the most probable among all tokens in the model's vocabulary (also called greedy decoding), |
| 28 | + /// while a `top-K` of 3 means that the next token is selected from among the three most probable tokens by using temperature. |
| 29 | + @CodableTriState public private(set) var topK: Double? |
| 30 | + |
| 31 | + /// Initializer for a AiLlmEndpointParamsIbm. |
| 32 | + /// |
| 33 | + /// - Parameters: |
| 34 | + /// - type: The type of the AI LLM endpoint params object for IBM. |
| 35 | + /// This parameter is **required**. |
| 36 | + /// - temperature: What sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, |
| 37 | + /// while lower values like 0.2 will make it more focused and deterministic. |
| 38 | + /// We generally recommend altering this or `top_p` but not both. |
| 39 | + /// - topP: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results |
| 40 | + /// of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability |
| 41 | + /// mass are considered. We generally recommend altering this or temperature but not both. |
| 42 | + /// - topK: `Top-K` changes how the model selects tokens for output. A `top-K` of 1 means the next selected token is |
| 43 | + /// the most probable among all tokens in the model's vocabulary (also called greedy decoding), |
| 44 | + /// while a `top-K` of 3 means that the next token is selected from among the three most probable tokens by using temperature. |
| 45 | + public init(type: AiLlmEndpointParamsIbmTypeField = AiLlmEndpointParamsIbmTypeField.ibmParams, temperature: TriStateField<Double> = nil, topP: TriStateField<Double> = nil, topK: TriStateField<Double> = nil) { |
| 46 | + self.type = type |
| 47 | + self._temperature = CodableTriState(state: temperature) |
| 48 | + self._topP = CodableTriState(state: topP) |
| 49 | + self._topK = CodableTriState(state: topK) |
| 50 | + } |
| 51 | + |
| 52 | + required public init(from decoder: Decoder) throws { |
| 53 | + let container = try decoder.container(keyedBy: CodingKeys.self) |
| 54 | + type = try container.decode(AiLlmEndpointParamsIbmTypeField.self, forKey: .type) |
| 55 | + temperature = try container.decodeIfPresent(Double.self, forKey: .temperature) |
| 56 | + topP = try container.decodeIfPresent(Double.self, forKey: .topP) |
| 57 | + topK = try container.decodeIfPresent(Double.self, forKey: .topK) |
| 58 | + } |
| 59 | + |
| 60 | + public func encode(to encoder: Encoder) throws { |
| 61 | + var container = encoder.container(keyedBy: CodingKeys.self) |
| 62 | + try container.encode(type, forKey: .type) |
| 63 | + try container.encode(field: _temperature.state, forKey: .temperature) |
| 64 | + try container.encode(field: _topP.state, forKey: .topP) |
| 65 | + try container.encode(field: _topK.state, forKey: .topK) |
| 66 | + } |
| 67 | + |
| 68 | +} |
0 commit comments