Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions tests/v1/kv_offload/test_cpu_offloading.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,10 @@
from vllm.utils.system_utils import set_env_var

CPU_BLOCK_SIZES = [48]
ATTN_BACKENDS = ["FLASH_ATTN"]
ATTN_BACKENDS = ["FLASH_ATTN", "TRITON_ATTN"]

if current_platform.is_cuda():
ATTN_BACKENDS.append("FLASHINFER")
elif current_platform.is_rocm():
ATTN_BACKENDS = ["TRITON_ATTN"]


class MockSubscriber:
Expand Down
13 changes: 13 additions & 0 deletions vllm/v1/attention/backends/triton_attn.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,19 @@ def get_kv_cache_shape(
raise ValueError("Block size must be a multiple of 16.")
return (num_blocks, 2, block_size, num_kv_heads, head_size)

@staticmethod
def get_kv_cache_stride_order(
include_num_layers_dimension: bool = False,
) -> tuple[int, ...]:
# `stride_order` indicates the permutation that gets
# us from `get_kv_cache_shape` to the actual memory layout we want.
if include_num_layers_dimension:
# (num_blocks, num_layers, 2, block_size, num_kv_heads, head_size)
return (1, 0, 2, 3, 4, 5)

# (num_blocks, 2, block_size, num_kv_heads, head_size)
return (0, 1, 2, 3, 4)

@staticmethod
def use_cascade_attention(*args, **kwargs) -> bool:
return False
Expand Down