Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
7d3dc17
Add docstrings for _models/completions/function.py - v1
Mandzhi Jul 12, 2025
724c952
Add docstrings for _models/completions/tune_params.py - v1
Mandzhi Jul 13, 2025
4c85fc2
Add docstrings for _models/completions/token.py - v1
Mandzhi Jul 13, 2025
26d83aa
Add docstrings for _models/completions/model.py - v1
Mandzhi Jul 14, 2025
34cc92b
Add docstrings for _models/completions/model.py - v2
Mandzhi Jul 14, 2025
55b4edc
Add docstrings for _models/completions/message.py - v1
Mandzhi Jul 14, 2025
283abee
Add docstrings for _models/completions/result.py - v1
Mandzhi Jul 14, 2025
4a5d4fa
Add docstrings for _models/completions/langchain.py - v1
Mandzhi Jul 14, 2025
1316a8d
Add docstrings for _models/completions/function.py - v2
Mandzhi Jul 15, 2025
1019f27
[pre-commit.ci lite] apply automatic fixes
pre-commit-ci-lite[bot] Jul 15, 2025
26ae98c
Add docstrings for_models/completions/function.py - v3
Mandzhi Jul 15, 2025
fd60330
Add docstrings for _models/completions/function.py - v4
Mandzhi Jul 15, 2025
37a537f
Add docstrings for _models/completions/function.py - v5
Mandzhi Jul 15, 2025
b8d2196
Add docstrings for _models/completions/function.py - v6
Mandzhi Jul 15, 2025
2f7de07
Merge branch 'master' into new-branch-7
vhaldemar Jul 15, 2025
f56c6c8
Add docstrings for _models/completions/function.py - v7
Mandzhi Jul 15, 2025
0b49743
[pre-commit.ci lite] apply automatic fixes
pre-commit-ci-lite[bot] Jul 15, 2025
b83ac65
Add docstrings for _models/completions/function.py - v8
Mandzhi Jul 15, 2025
7d2e22c
Add docstrings for _models/completions/langchain.py - v2
Mandzhi Jul 15, 2025
4950c32
Add docstrings for _models/completions/ model.py - v3
Mandzhi Jul 15, 2025
cf7ae28
Add docstrings for _models/completions/result.py - v2
Mandzhi Jul 15, 2025
082e74b
Merge branch 'yandex-cloud:master' into new-branch-7
Mandzhi Jul 15, 2025
7cfe2f5
Add docstrings for _models/completions/model.py - v4
Mandzhi Jul 15, 2025
37f4c43
Add docstrings for _models/completions/config.py - v1
Mandzhi Jul 15, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 22 additions & 1 deletion src/yandex_cloud_ml_sdk/_models/completions/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,21 +19,42 @@


class ReasoningMode(ProtoEnumBase, Enum):
"""Enumeration for reasoning modes.

This class defines the various modes of reasoning that can be used
in the model's configurations.
"""
#: indicates that the reasoning mode is unspecified
REASONING_MODE_UNSPECIFIED = _m.REASONING_MODE_UNSPECIFIED
#: indicates that reasoning is disabled
DISABLED = _m.DISABLED
#: indicates that reasoning is enabled but hidden
ENABLED_HIDDEN = _m.ENABLED_HIDDEN


#: type alias for reasoning mode representation
ReasoningModeType = Union[int, str, ReasoningMode]
#: type alias for completion tools
CompletionTool: TypeAlias = FunctionTool


@dataclass(frozen=True)
class GPTModelConfig(BaseModelConfig):
"""Configuration for the GPT model.

It holds the configuration settings for the GPT model,
including parameters for generation and tool usage.
"""
#: a sampling temperature to use - higher values mean more random results; should be a double number between 0 (inclusive) and 1 (inclusive)
temperature: float | None = None
#: a maximum number of tokens to generate in the response
max_tokens: int | None = None
#: the mode of reasoning to apply during generation, allowing the model to perform internal reasoning before responding
reasoning_mode: ReasoningModeType | None = None
#: a format of the response returned by the model. Could be a JsonSchema, a JSON string, or a pydantic model
response_format: ResponseType | None = None
#: tools to use for completion. Can be a sequence or a single tool
tools: Sequence[CompletionTool] | CompletionTool | None = None
#: whether to allow parallel calls to tools during completion; defaults to 'true'
parallel_tool_calls: bool | None = None
#: the strategy for choosing tools: depending on this parameter, the model can always call some tool, call the specific tool or don't call any tool.
tool_choice: ToolChoiceType | None = None
24 changes: 22 additions & 2 deletions src/yandex_cloud_ml_sdk/_models/completions/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,38 @@
from typing_extensions import override

from yandex_cloud_ml_sdk._types.function import BaseModelFunction, ModelTypeT
from yandex_cloud_ml_sdk._utils.doc import doc_from

from .model import AsyncGPTModel, GPTModel


class BaseCompletions(BaseModelFunction[ModelTypeT]):
"""
A class for handling completions models.

It defines the core functionality for calling a model
to generate completions based on the provided model name and version.
"""
@override
def __call__(
self,
model_name: str,
*,
model_version: str = 'latest',
) -> ModelTypeT:
"""
Create a model object to call for generating completions.

This method constructs the URI for the model based on the provided
name and version. If the name contains ``://``, it is
treated as a full URI. Otherwise, it looks up the model name in
the well-known names dictionary. But after this, in any case,
we construct a URI in the form ``gpt://<folder_id>/<model>/<version>``.

:param model_name: the name or URI of the model to call.
:param model_version: the version of the model to use.
Defaults to 'latest'.
"""
if '://' in model_name:
uri = model_name
else:
Expand All @@ -26,10 +46,10 @@ def __call__(
uri=uri,
)


@doc_from(BaseCompletions)
class Completions(BaseCompletions[GPTModel]):
_model_type = GPTModel


@doc_from(BaseCompletions)
class AsyncCompletions(BaseCompletions[AsyncGPTModel]):
_model_type = AsyncGPTModel
2 changes: 2 additions & 0 deletions src/yandex_cloud_ml_sdk/_models/completions/langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ def _transform_messages(history: list[BaseMessage]) -> list[TextMessageDict]:


class ChatYandexGPT(BaseYandexLanguageModel[BaseGPTModel], BaseChatModel):
"""Chat model for Yandex GPT integration.
This class provides integration with the `LangChain <https://python.langchain.com/docs/introduction/>`_ library."""
class Config:
arbitrary_types_allowed = True

Expand Down
12 changes: 11 additions & 1 deletion src/yandex_cloud_ml_sdk/_models/completions/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,18 @@

@runtime_checkable
class TextMessageWithToolCallsProtocol(TextMessageProtocol, Protocol):
"""
A class with a protocol which defines a text message structure with associated tool calls.
The protocol extends the TextMessageProtocol and requires a list of tool calls.
"""
tool_calls: ToolCallList


class FunctionResultMessageDict(TypedDict):
"""
A class with the TypedDict representing the structure of a function result message.
The dictionary contains the role of the message sender and the results of tool calls.
"""
role: NotRequired[str]
tool_results: Required[Iterable[ToolResultDictType]]

Expand All @@ -31,12 +39,14 @@ class _ProtoMessageKwargs(TypedDict):
tool_result_list: NotRequired[ProtoCompletionsToolResultList]
tool_call_list: NotRequired[ProtoCompletionsToolCallList]


#: a type alias for a message that can either be a standard message or a function result message.
CompletionsMessageType = Union[MessageType, FunctionResultMessageDict]
#: a type alias for input that can be either a single completion message or a collection (i.e. an iterable) of completion messages.
MessageInputType = Union[CompletionsMessageType, Iterable[CompletionsMessageType]]


def messages_to_proto(messages: MessageInputType) -> list[ProtoMessage]:
""":meta private:"""
msgs: tuple[CompletionsMessageType, ...] = coerce_tuple(
messages,
(dict, str, TextMessageProtocol), # type: ignore[arg-type]
Expand Down
Loading