Skip to content

Commit 86534bf

Browse files
committed
Linting
1 parent b6a88bd commit 86534bf

File tree

1 file changed

+4
-1
lines changed

1 file changed

+4
-1
lines changed

megatron/core/optimizer/qk_clip.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,10 @@ def clip_qk(model, log_max_only=False) -> float:
2222
for model_chunk in model:
2323
for transformer_layer in model_chunk.module.module.decoder.layers:
2424
if hasattr(transformer_layer.self_attention, 'clip_qk'):
25-
if transformer_layer.self_attention.core_attention.current_max_attn_logits is None:
25+
if (
26+
transformer_layer.self_attention.core_attention.current_max_attn_logits
27+
is None
28+
):
2629
continue
2730
torch.distributed.all_reduce(
2831
transformer_layer.self_attention.core_attention.current_max_attn_logits,

0 commit comments

Comments
 (0)