forked from keras-team/keras-cv
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsegformer_multihead_attention.py
More file actions
132 lines (113 loc) · 4.84 KB
/
segformer_multihead_attention.py
File metadata and controls
132 lines (113 loc) · 4.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.layers.SegFormerMultiheadAttention")
class SegFormerMultiheadAttention(keras.layers.Layer):
def __init__(self, project_dim, num_heads, sr_ratio):
"""
Efficient MultiHeadAttention implementation as a Keras layer.
A huge bottleneck in scaling transformers is the self-attention layer
with an O(n^2) complexity.
SegFormerMultiheadAttention performs a sequence reduction (SR) operation
with a given ratio, to reduce the sequence length before performing key and value projections,
reducing the O(n^2) complexity to O(n^2/R) where R is the sequence reduction ratio.
References:
- [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) (CVPR 2021) # noqa: E501
- [NVlabs' official implementation](https://github.com/NVlabs/SegFormer/blob/master/mmseg/models/backbones/mix_transformer.py) # noqa: E501
- [@sithu31296's reimplementation](https://github.com/sithu31296/semantic-segmentation/blob/main/semseg/models/backbones/mit.py) # noqa: E501
- [Ported from the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/blob/main/deepvision/layers/efficient_attention.py) # noqa: E501
Args:
project_dim: integer, the dimensionality of the projection
of the `SegFormerMultiheadAttention` layer.
num_heads: integer, the number of heads to use in the
attention computation.
sr_ratio: integer, the sequence reduction ratio to perform
on the sequence before key and value projections.
Basic usage:
```
tensor = tf.random.uniform([1, 196, 32])
output = keras_cv.layers.SegFormerMultiheadAttention(project_dim=768,
num_heads=2,
sr_ratio=4)(tensor)
print(output.shape) # (1, 196, 32)
```
"""
super().__init__()
self.num_heads = num_heads
self.sr_ratio = sr_ratio
self.scale = (project_dim // num_heads) ** -0.5
self.q = keras.layers.Dense(project_dim)
self.k = keras.layers.Dense(project_dim)
self.v = keras.layers.Dense(project_dim)
self.proj = keras.layers.Dense(project_dim)
if sr_ratio > 1:
self.sr = keras.layers.Conv2D(
filters=project_dim,
kernel_size=sr_ratio,
strides=sr_ratio,
padding="same",
)
self.norm = keras.layers.LayerNormalization()
def call(self, x):
input_shape = ops.shape(x)
H, W = int(math.sqrt(input_shape[1])), int(math.sqrt(input_shape[1]))
B, C = input_shape[0], input_shape[2]
q = self.q(x)
q = ops.reshape(
q,
(
input_shape[0],
input_shape[1],
self.num_heads,
input_shape[2] // self.num_heads,
),
)
q = ops.transpose(q, [0, 2, 1, 3])
if self.sr_ratio > 1:
x = ops.reshape(
ops.transpose(x, [0, 2, 1]),
(B, H, W, C),
)
x = self.sr(x)
x = ops.reshape(x, [input_shape[0], input_shape[2], -1])
x = ops.transpose(x, [0, 2, 1])
x = self.norm(x)
k = self.k(x)
v = self.v(x)
k = ops.transpose(
ops.reshape(
k,
[B, -1, self.num_heads, C // self.num_heads],
),
[0, 2, 1, 3],
)
v = ops.transpose(
ops.reshape(
v,
[B, -1, self.num_heads, C // self.num_heads],
),
[0, 2, 1, 3],
)
attn = (q @ ops.transpose(k, [0, 1, 3, 2])) * self.scale
attn = ops.nn.softmax(attn, axis=-1)
attn = attn @ v
attn = ops.reshape(
ops.transpose(attn, [0, 2, 1, 3]),
[input_shape[0], input_shape[1], input_shape[2]],
)
x = self.proj(attn)
return x