-
Notifications
You must be signed in to change notification settings - Fork 3.1k
Expand file tree
/
Copy pathbuilder.py
More file actions
212 lines (187 loc) · 6.4 KB
/
builder.py
File metadata and controls
212 lines (187 loc) · 6.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
import contextlib
from collections import deque
from collections.abc import Generator, Iterable
from dataclasses import dataclass, field
import mlx.core as mx
from loguru import logger
from exo.api.types import ImageEditsTaskParams, ImageGenerationTaskParams
from exo.shared.constants import EXO_TRACING_ENABLED
from exo.shared.tracing import clear_trace_buffer, get_trace_buffer
from exo.shared.types.chunks import Chunk, ErrorChunk
from exo.shared.types.events import (
Event,
TraceEventData,
TracesCollected,
)
from exo.shared.types.tasks import (
GenerationTask,
ImageEdits,
ImageGeneration,
ImageTask,
TaskId,
)
from exo.shared.types.worker.instances import BoundInstance
from exo.shared.types.worker.runner_response import (
CancelledResponse,
FinishedResponse,
ModelLoadingResponse,
)
from exo.shared.types.worker.shards import (
CfgShardMetadata,
PipelineShardMetadata,
ShardMetadata,
)
from exo.utils.channels import MpReceiver, MpSender
from exo.worker.engines.base import Builder, Engine
from exo.worker.engines.image.distributed_model import (
DistributedImageModel,
)
from exo.worker.engines.image.generate import (
generate_image,
warmup_image_generator,
)
from exo.worker.engines.mlx.utils_mlx import (
initialize_mlx,
)
def _is_primary_output_node(shard_metadata: ShardMetadata) -> bool:
"""Check if this node is the primary output node for image generation.
For CFG models: the last pipeline stage in CFG group 0 (positive prompt).
For non-CFG models: the last pipeline stage.
"""
if isinstance(shard_metadata, CfgShardMetadata):
is_pipeline_last = (
shard_metadata.pipeline_rank == shard_metadata.pipeline_world_size - 1
)
return is_pipeline_last and shard_metadata.cfg_rank == 0
elif isinstance(shard_metadata, PipelineShardMetadata):
return shard_metadata.device_rank == shard_metadata.world_size - 1
return False
def _send_traces_if_enabled(
event_sender: MpSender[Event],
task_id: TaskId,
rank: int,
) -> None:
if not EXO_TRACING_ENABLED:
return
traces = get_trace_buffer()
if traces:
trace_data = [
TraceEventData(
name=t.name,
start_us=t.start_us,
duration_us=t.duration_us,
rank=t.rank,
category=t.category,
)
for t in traces
]
event_sender.send(
TracesCollected(
task_id=task_id,
rank=rank,
traces=trace_data,
)
)
clear_trace_buffer()
@dataclass
class MfluxBuilder(Builder):
event_sender: MpSender[Event]
cancel_receiver: MpReceiver[TaskId]
shard_metadata: ShardMetadata | None = None
image_model: DistributedImageModel | None = None
group: mx.distributed.Group | None = None
def connect(self, bound_instance: BoundInstance) -> None:
self.group = initialize_mlx(bound_instance)
def load(self, bound_instance: BoundInstance) -> Generator[ModelLoadingResponse]:
self.shard_metadata = bound_instance.bound_shard
self.image_model = DistributedImageModel.from_shard_metadata(
bound_instance.bound_shard, self.group
)
return
# very important!
yield
def close(self) -> None:
with contextlib.suppress(NameError, AttributeError):
del self.image_model, self.group
def build(
self,
) -> Engine:
assert self.image_model
assert self.shard_metadata
return ImageEngine(
self.image_model,
self.shard_metadata,
self.event_sender,
self.cancel_receiver,
)
@dataclass
class ImageEngine(Engine):
image_model: DistributedImageModel
shard_metadata: ShardMetadata
event_sender: MpSender[Event]
cancel_receiver: MpReceiver[TaskId]
current_gen: Generator[tuple[TaskId, Chunk]] | None = field(
init=False, default=None
)
queue: deque[ImageTask] = field(init=False, default_factory=deque)
def warmup(self) -> None:
image = warmup_image_generator(model=self.image_model)
if image is not None:
logger.info(f"warmed up by generating {image.size} image")
else:
logger.info("warmup completed (non-primary node)")
def submit(
self,
task: GenerationTask,
) -> None:
assert isinstance(task, (ImageGeneration, ImageEdits))
self.queue.append(task)
def step(
self,
) -> Iterable[tuple[TaskId, Chunk | CancelledResponse | FinishedResponse]]:
resp = None
if self.current_gen is not None:
resp = next(self.current_gen, None)
if resp is None and len(self.queue) > 0:
task = self.queue.popleft()
self.current_gen = self._run_image_task(task.task_id, task.task_params)
resp = next(self.current_gen, None)
return (resp,) if resp is not None else ()
def close(self) -> None:
with contextlib.suppress(NameError, AttributeError):
del self.image_model
def _run_image_task(
self,
task_id: TaskId,
task_params: ImageGenerationTaskParams | ImageEditsTaskParams,
) -> Generator[tuple[TaskId, Chunk]]:
assert self.image_model
logger.info(f"received image task: {str(task_params)[:500]}")
def cancel_checker() -> bool:
for cancel_id in self.cancel_receiver.collect():
self._cancelled_tasks.add(cancel_id)
return self.should_cancel(task_id)
try:
for response in generate_image(
model=self.image_model,
task=task_params,
cancel_checker=cancel_checker,
):
if _is_primary_output_node(self.shard_metadata):
yield (task_id, response)
except Exception as e:
if _is_primary_output_node(self.shard_metadata):
yield (
task_id,
ErrorChunk(
model=self.shard_metadata.model_card.model_id,
finish_reason="error",
error_message=str(e),
),
)
raise
finally:
_send_traces_if_enabled(
self.event_sender, task_id, self.shard_metadata.device_rank
)
return