Skip to content

Commit 75decca

Browse files
committed
fix(diffusers): LoRA adapters, handle several base models
Signed-off-by: Raphael Glon <[email protected]>
1 parent dd29413 commit 75decca

File tree

1 file changed

+4
-2
lines changed

1 file changed

+4
-2
lines changed

docker_images/diffusers/app/lora.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,8 @@ def _load_textual_embeddings(self, adapter, model_data):
141141
logger.info("Text embeddings loaded for adapter %s", adapter)
142142
else:
143143
logger.info(
144-
"No text embeddings were loaded due to invalid embeddings or a mismatch of token sizes for adapter %s",
144+
"No text embeddings were loaded due to invalid embeddings or a mismatch of token sizes "
145+
"for adapter %s",
145146
adapter,
146147
)
147148
self.current_tokens_loaded = tokens_to_add
@@ -157,7 +158,8 @@ def _load_lora_adapter(self, kwargs):
157158
logger.error(msg)
158159
raise ValueError(msg)
159160
base_model = model_data.cardData["base_model"]
160-
if self.model_id != base_model:
161+
if (isinstance(base_model, list) and (self.model_id not in base_model)) or \
162+
(self.model_id != base_model):
161163
msg = f"Requested adapter {adapter:s} is not a LoRA adapter for base model {self.model_id:s}"
162164
logger.error(msg)
163165
raise ValueError(msg)

0 commit comments

Comments
 (0)