-
Notifications
You must be signed in to change notification settings - Fork 136
Expand file tree
/
Copy pathinterfaces.py
More file actions
194 lines (157 loc) · 5.71 KB
/
interfaces.py
File metadata and controls
194 lines (157 loc) · 5.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
# Copyright 2026 The ODML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interfaces for LiteRT LM engines and conversations."""
from __future__ import annotations
import abc
import collections.abc
import dataclasses
import enum
import pathlib
from typing import Any
class Backend(enum.Enum):
"""Hardware backends for LiteRT-LM."""
UNSPECIFIED = 0
CPU = 3
GPU = 4
NPU = 6
@dataclasses.dataclass(kw_only=True)
class AbstractEngine(abc.ABC):
"""Abstract base class for LiteRT-LM engines.
Attributes:
model_path: Path to the model file.
backend: The hardware backend used for inference.
max_num_tokens: Maximum number of tokens for the KV cache.
cache_dir: Directory for caching compiled model artifacts.
"""
model_path: str
backend: Backend
max_num_tokens: int = 4096
cache_dir: str = ""
def __enter__(self) -> AbstractEngine:
"""Initializes the engine resources."""
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""Releases the engine resources."""
del exc_type, exc_val, exc_tb
@abc.abstractmethod
def create_conversation(
self,
*,
messages: (
collections.abc.Sequence[collections.abc.Mapping[str, Any]] | None
) = None,
tools: (
collections.abc.Sequence[collections.abc.Callable[..., Any]] | None
) = None,
) -> AbstractConversation:
"""Creates a new conversation for this engine.
Args:
messages: A sequence of messages for the conversation preface. Each
message is a mapping that should contain 'role' and 'content' keys.
tools: A list of Python functions to be used as tools.
"""
class AbstractConversation(abc.ABC):
"""Abstract base class for managing LiteRT-LM conversations.
Attributes:
messages: A sequence of messages for the conversation preface.
tools: A list of Python functions to be used as tools.
"""
def __init__(
self,
*,
messages: (
collections.abc.Sequence[collections.abc.Mapping[str, Any]] | None
) = None,
tools: (
collections.abc.Sequence[collections.abc.Callable[..., Any]] | None
) = None,
):
"""Initializes the instance.
Args:
messages: A sequence of messages for the conversation preface. Each
message is a mapping that should contain 'role' and 'content' keys.
tools: A list of Python functions to be used as tools.
"""
self.messages = messages or []
self.tools = tools or []
def __enter__(self) -> AbstractConversation:
"""Initializes the conversation."""
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""Releases the conversation."""
del exc_type, exc_val, exc_tb
@abc.abstractmethod
def send_message(
self, message: str | collections.abc.Mapping[str, Any]
) -> collections.abc.Mapping[str, Any]:
"""Sends a message and returns the response.
Args:
message: The input message to send to the model. Example: "Hello" or
{"role": "user", "content": "Hello"}.
Returns:
A dictionary containing the model's response. The structure is:
{"role": "assistant", "content": [{"type": "text", "text": "..."}]}
"""
@abc.abstractmethod
def send_message_async(
self, message: str | collections.abc.Mapping[str, Any]
) -> collections.abc.Iterator[collections.abc.Mapping[str, Any]]:
"""Sends a message and streams the response.
Args:
message: The input message to send to the model. Example: "Hello" or
{"role": "user", "content": "Hello"}.
Returns:
An iterator yielding dictionaries containing chunks of the model's
response.
"""
def cancel_process(self) -> None:
"""Cancels the current inference process."""
@dataclasses.dataclass
class BenchmarkInfo(abc.ABC):
"""Results from a benchmark run.
Attributes:
init_time_in_second: The time in seconds to initialize the engine and the
conversation.
time_to_first_token_in_second: The time in seconds to the first token.
last_prefill_token_count: The number of tokens in the last prefill.
last_prefill_tokens_per_second: The number of tokens processed per second
in the last prefill.
last_decode_token_count: The number of tokens in the last decode.
last_decode_tokens_per_second: The number of tokens processed per second
in the last decode.
"""
init_time_in_second: float
time_to_first_token_in_second: float
last_prefill_token_count: int
last_prefill_tokens_per_second: float
last_decode_token_count: int
last_decode_tokens_per_second: float
@dataclasses.dataclass
class AbstractBenchmark(abc.ABC):
"""Abstract base class for LiteRT-LM benchmarks.
Attributes:
model_path: Path to the model file.
backend: The hardware backend used for inference.
prefill_tokens: Number of tokens for the prefill phase.
decode_tokens: Number of tokens for the decode phase.
cache_dir: Directory for caching compiled model artifacts.
"""
model_path: str
backend: Backend
prefill_tokens: int = 256
decode_tokens: int = 256
cache_dir: str = ""
@abc.abstractmethod
def run(self) -> BenchmarkInfo:
"""Runs the benchmark and returns the result."""