| 
 | 1 | +import logging  | 
 | 2 | + | 
 | 3 | +logger = logging.getLogger(__name__)  | 
 | 4 | +logger.setLevel(logging.INFO)  | 
 | 5 | + | 
 | 6 | +import torch  | 
 | 7 | + | 
 | 8 | +def split_einsum(q, k, v, mask, heads, dim_head):  | 
 | 9 | +    """ Attention Implementation backing AttentionImplementations.SPLIT_EINSUM  | 
 | 10 | +
  | 
 | 11 | +    - Implements https://machinelearning.apple.com/research/neural-engine-transformers  | 
 | 12 | +    - Recommended for ANE  | 
 | 13 | +    - Marginally slower on GPU  | 
 | 14 | +    """  | 
 | 15 | +    mh_q = [  | 
 | 16 | +        q[:, head_idx * dim_head:(head_idx + 1) *  | 
 | 17 | +          dim_head, :, :] for head_idx in range(heads)  | 
 | 18 | +    ]  # (bs, dim_head, 1, max_seq_length) * heads  | 
 | 19 | + | 
 | 20 | +    k = k.transpose(1, 3)  | 
 | 21 | +    mh_k = [  | 
 | 22 | +        k[:, :, :,  | 
 | 23 | +          head_idx * dim_head:(head_idx + 1) * dim_head]  | 
 | 24 | +        for head_idx in range(heads)  | 
 | 25 | +    ]  # (bs, max_seq_length, 1, dim_head) * heads  | 
 | 26 | + | 
 | 27 | +    mh_v = [  | 
 | 28 | +        v[:, head_idx * dim_head:(head_idx + 1) *  | 
 | 29 | +          dim_head, :, :] for head_idx in range(heads)  | 
 | 30 | +    ]  # (bs, dim_head, 1, max_seq_length) * heads  | 
 | 31 | + | 
 | 32 | +    attn_weights = [  | 
 | 33 | +        torch.einsum("bchq,bkhc->bkhq", [qi, ki]) * (dim_head**-0.5)  | 
 | 34 | +        for qi, ki in zip(mh_q, mh_k)  | 
 | 35 | +    ]  # (bs, max_seq_length, 1, max_seq_length) * heads  | 
 | 36 | + | 
 | 37 | +    if mask is not None:  | 
 | 38 | +        for head_idx in range(heads):  | 
 | 39 | +            attn_weights[head_idx] = attn_weights[head_idx] + mask  | 
 | 40 | + | 
 | 41 | +    attn_weights = [  | 
 | 42 | +        aw.softmax(dim=1) for aw in attn_weights  | 
 | 43 | +    ]  # (bs, max_seq_length, 1, max_seq_length) * heads  | 
 | 44 | +    attn = [  | 
 | 45 | +        torch.einsum("bkhq,bchk->bchq", wi, vi)  | 
 | 46 | +        for wi, vi in zip(attn_weights, mh_v)  | 
 | 47 | +    ]  # (bs, dim_head, 1, max_seq_length) * heads  | 
 | 48 | + | 
 | 49 | +    attn = torch.cat(attn, dim=1)  # (bs, dim, 1, max_seq_length)  | 
 | 50 | +    return attn  | 
 | 51 | + | 
 | 52 | + | 
 | 53 | +CHUNK_SIZE = 512  | 
 | 54 | + | 
 | 55 | +def split_einsum_v2(q, k, v, mask, heads, dim_head):  | 
 | 56 | +    """ Attention Implementation backing AttentionImplementations.SPLIT_EINSUM_V2  | 
 | 57 | +
  | 
 | 58 | +    - Implements https://machinelearning.apple.com/research/neural-engine-transformers  | 
 | 59 | +    - Recommended for ANE  | 
 | 60 | +    - Marginally slower on GPU  | 
 | 61 | +    - Chunks the query sequence to avoid large intermediate tensors and improves ANE performance  | 
 | 62 | +    """  | 
 | 63 | +    query_seq_length = q.size(3)  | 
 | 64 | +    num_chunks = query_seq_length // CHUNK_SIZE  | 
 | 65 | +      | 
 | 66 | +    if num_chunks == 0:  | 
 | 67 | +        logger.info(  | 
 | 68 | +            "AttentionImplementations.SPLIT_EINSUM_V2: query sequence too short to chunk "  | 
 | 69 | +            f"({query_seq_length}<{CHUNK_SIZE}), fall back to AttentionImplementations.SPLIT_EINSUM (safe to ignore)")  | 
 | 70 | +        return split_einsum(q, k, v, mask, heads, dim_head)  | 
 | 71 | +      | 
 | 72 | +    logger.info(  | 
 | 73 | +        "AttentionImplementations.SPLIT_EINSUM_V2: Splitting query sequence length of "  | 
 | 74 | +        f"{query_seq_length} into {num_chunks} chunks")  | 
 | 75 | + | 
 | 76 | +    mh_q = [  | 
 | 77 | +        q[:, head_idx * dim_head:(head_idx + 1) *  | 
 | 78 | +          dim_head, :, :] for head_idx in range(heads)  | 
 | 79 | +    ]  # (bs, dim_head, 1, max_seq_length) * heads  | 
 | 80 | + | 
 | 81 | +    # Chunk the query sequence for each head  | 
 | 82 | +    mh_q_chunked = [  | 
 | 83 | +        [h_q[..., chunk_idx * CHUNK_SIZE:(chunk_idx + 1) * CHUNK_SIZE] for chunk_idx in range(num_chunks)]  | 
 | 84 | +        for h_q in mh_q  | 
 | 85 | +    ]  # ((bs, dim_head, 1, QUERY_SEQ_CHUNK_SIZE) * num_chunks) * heads  | 
 | 86 | + | 
 | 87 | +    k = k.transpose(1, 3)  | 
 | 88 | +    mh_k = [  | 
 | 89 | +        k[:, :, :,  | 
 | 90 | +          head_idx * dim_head:(head_idx + 1) * dim_head]  | 
 | 91 | +        for head_idx in range(heads)  | 
 | 92 | +    ]  # (bs, max_seq_length, 1, dim_head) * heads  | 
 | 93 | + | 
 | 94 | +    mh_v = [  | 
 | 95 | +        v[:, head_idx * dim_head:(head_idx + 1) *  | 
 | 96 | +          dim_head, :, :] for head_idx in range(heads)  | 
 | 97 | +    ]  # (bs, dim_head, 1, max_seq_length) * heads  | 
 | 98 | + | 
 | 99 | +    attn_weights = [  | 
 | 100 | +        [  | 
 | 101 | +            torch.einsum("bchq,bkhc->bkhq", [qi_chunk, ki]) * (dim_head**-0.5)  | 
 | 102 | +            for qi_chunk in h_q_chunked  | 
 | 103 | +        ] for h_q_chunked, ki in zip(mh_q_chunked, mh_k)  | 
 | 104 | +    ]  # ((bs, max_seq_length, 1, chunk_size) * num_chunks) * heads  | 
 | 105 | + | 
 | 106 | +    attn_weights = [  | 
 | 107 | +        [aw_chunk.softmax(dim=1) for aw_chunk in aw_chunked]  | 
 | 108 | +        for aw_chunked in attn_weights  | 
 | 109 | +    ]  # ((bs, max_seq_length, 1, chunk_size) * num_chunks) * heads  | 
 | 110 | + | 
 | 111 | +    attn = [  | 
 | 112 | +        [  | 
 | 113 | +            torch.einsum("bkhq,bchk->bchq", wi_chunk, vi)  | 
 | 114 | +            for wi_chunk in wi_chunked  | 
 | 115 | +        ] for wi_chunked, vi in zip(attn_weights, mh_v)  | 
 | 116 | +    ]  # ((bs, dim_head, 1, chunk_size) * num_chunks) * heads  | 
 | 117 | + | 
 | 118 | +    attn = torch.cat([  | 
 | 119 | +        torch.cat(attn_chunked, dim=3) for attn_chunked in attn  | 
 | 120 | +    ], dim=1)  # (bs, dim, 1, max_seq_length)  | 
 | 121 | + | 
 | 122 | +    return attn  | 
 | 123 | + | 
 | 124 | + | 
 | 125 | +def original(q, k, v, mask, heads, dim_head):  | 
 | 126 | +    """ Attention Implementation backing AttentionImplementations.ORIGINAL  | 
 | 127 | +
  | 
 | 128 | +    - Not recommended for ANE  | 
 | 129 | +    - Recommended for GPU  | 
 | 130 | +    """  | 
 | 131 | +    bs = q.size(0)  | 
 | 132 | +    mh_q = q.view(bs, heads, dim_head, -1)  | 
 | 133 | +    mh_k = k.view(bs, heads, dim_head, -1)  | 
 | 134 | +    mh_v = v.view(bs, heads, dim_head, -1)  | 
 | 135 | + | 
 | 136 | +    attn_weights = torch.einsum("bhcq,bhck->bhqk", [mh_q, mh_k])  | 
 | 137 | +    attn_weights.mul_(dim_head**-0.5)  | 
 | 138 | + | 
 | 139 | +    if mask is not None:  | 
 | 140 | +        attn_weights = attn_weights + mask  | 
 | 141 | + | 
 | 142 | +    attn_weights = attn_weights.softmax(dim=3)  | 
 | 143 | + | 
 | 144 | +    attn = torch.einsum("bhqk,bhck->bhcq", [attn_weights, mh_v])  | 
 | 145 | +    attn = attn.contiguous().view(bs, heads * dim_head, 1, -1)  | 
 | 146 | +    return attn  | 
0 commit comments