Skip to content

Commit c441265

Browse files
committed
Apply suggestions from code review
1 parent eb9ea32 commit c441265

File tree

2 files changed

+5
-2
lines changed

2 files changed

+5
-2
lines changed

src/plugins/intel_cpu/src/nodes/scaled_attn.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -414,10 +414,9 @@ struct MHAKernel<ScaledDotProductAttention::KT_ONEDNN, T> {
414414
const size_t m_block_size = qk_gemm_ptr->get_mblk_size();
415415
auto m_blocks = (q_len + m_block_size - 1) / m_block_size;
416416
bool is_xf16 = any_of(precision_of<T>::value, ov::element::bf16, ov::element::f16);
417-
// packed k, v
418417
auto attn_mask_precision =
419418
attention_mask ? attention_mask.get_precision() : ov::element::Type(precision_of<T>::value);
420-
419+
// packed k, v
421420
parallel_for2d(B, Hk, [&](size_t b, size_t h) {
422421
T* k_ptr = &present_key.at<T>({b, h, 0, 0});
423422
T* v_ptr = &present_value.at<T>({b, h, 0, 0});

src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/stateful_sdpa_bool_mask.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
// Copyright (C) 2025 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
//
4+
15
#include <gtest/gtest.h>
26

37
#include <cstddef>

0 commit comments

Comments
 (0)