Skip to content

Commit ae8f74b

Browse files
authored
[None][chore] Reduce tedious logs (NVIDIA#10847)
Signed-off-by: Yanchao Lu <yanchaol@nvidia.com>
1 parent 3a89495 commit ae8f74b

1 file changed

Lines changed: 2 additions & 2 deletions

File tree

tensorrt_llm/_torch/modules/attention.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -334,11 +334,11 @@ def __init__(
334334
key="sparse_attention_config")
335335

336336
if config.sparse_attention_config.algorithm == "rocket":
337-
logger.warning("disable rope_fusion for RocketKV.")
337+
logger.info_once("disable rope_fusion for RocketKV.")
338338
self.rope_fusion = False
339339

340340
if self.rope_fusion and not attn_cls.support_fused_rope():
341-
logger.warning(
341+
logger.info_once(
342342
"rope_fusion is true but the attention backend does not support it. Will disable rope_fusion."
343343
)
344344
self.rope_fusion = False

0 commit comments

Comments
 (0)