|
19 | 19 |
|
20 | 20 |
|
21 | 21 | class ReasoningMode(ProtoEnumBase, Enum): |
| 22 | + """Enumeration for reasoning modes. |
| 23 | +
|
| 24 | + This class defines the various modes of reasoning that can be used |
| 25 | + in the model's configurations. |
| 26 | + """ |
| 27 | + #: indicates that the reasoning mode is unspecified |
22 | 28 | REASONING_MODE_UNSPECIFIED = _m.REASONING_MODE_UNSPECIFIED |
| 29 | + #: indicates that reasoning is disabled |
23 | 30 | DISABLED = _m.DISABLED |
| 31 | + #: indicates that reasoning is enabled but hidden |
24 | 32 | ENABLED_HIDDEN = _m.ENABLED_HIDDEN |
25 | 33 |
|
26 | | - |
| 34 | +#: type alias for reasoning mode representation |
27 | 35 | ReasoningModeType = Union[int, str, ReasoningMode] |
| 36 | +#: type alias for completion tools |
28 | 37 | CompletionTool: TypeAlias = FunctionTool |
29 | 38 |
|
30 | 39 |
|
31 | 40 | @dataclass(frozen=True) |
32 | 41 | class GPTModelConfig(BaseModelConfig): |
| 42 | + """Configuration for the GPT model. |
| 43 | +
|
| 44 | + It holds the configuration settings for the GPT model, |
| 45 | + including parameters for generation and tool usage. |
| 46 | + """ |
| 47 | + #: a sampling temperature to use - higher values mean more random results; should be a double number between 0 (inclusive) and 1 (inclusive) |
33 | 48 | temperature: float | None = None |
| 49 | + #: a maximum number of tokens to generate in the response |
34 | 50 | max_tokens: int | None = None |
| 51 | + #: the mode of reasoning to apply during generation, allowing the model to perform internal reasoning before responding |
35 | 52 | reasoning_mode: ReasoningModeType | None = None |
| 53 | + #: a format of the response returned by the model. Could be a JsonSchema, a JSON string, or a pydantic model |
36 | 54 | response_format: ResponseType | None = None |
| 55 | + #: tools to use for completion. Can be a sequence or a single tool |
37 | 56 | tools: Sequence[CompletionTool] | CompletionTool | None = None |
| 57 | + #: whether to allow parallel calls to tools during completion; defaults to 'true' |
38 | 58 | parallel_tool_calls: bool | None = None |
| 59 | + #: the strategy for choosing tools: depending on this parameter, the model can always call some tool, call the specific tool or don't call any tool. |
39 | 60 | tool_choice: ToolChoiceType | None = None |
0 commit comments