Skip to content

Commit c02eaae

Browse files
committed
fix for existing test
Signed-off-by: Aurelien Chartier <2567591+achartier@users.noreply.github.com>
1 parent 84f3e23 commit c02eaae

File tree

1 file changed

+5
-4
lines changed

1 file changed

+5
-4
lines changed

tensorrt_llm/_torch/pyexecutor/_util.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1342,8 +1342,10 @@ def _infer_shared_expert_size_from_adapter(adapter_dir: str) -> int:
13421342

13431343
try:
13441344
from tensorrt_llm.lora_manager import get_model_path, load_state_dict
1345-
adapter_weights = load_state_dict(
1346-
get_model_path(adapter_dir, "adapter_model"))
1345+
model_path = get_model_path(adapter_dir, "adapter_model")
1346+
if model_path is None:
1347+
return 0
1348+
adapter_weights = load_state_dict(model_path)
13471349
if adapter_weights is None:
13481350
return 0
13491351
for key, tensor in adapter_weights.items():
@@ -1356,8 +1358,7 @@ def _infer_shared_expert_size_from_adapter(adapter_dir: str) -> int:
13561358
return tensor.shape[1] if tensor.shape[
13571359
0] == rank else tensor.shape[0]
13581360
except Exception as e:
1359-
logger.debug(f"Failed to infer shared expert size from adapter: {e}",
1360-
exc_info=True)
1361+
logger.debug(f"Failed to infer shared expert size from adapter: {e}")
13611362
return 0
13621363

13631364

0 commit comments

Comments
 (0)