forked from keras-team/keras-hub
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsegformer_backbone.py
More file actions
163 lines (133 loc) · 5.53 KB
/
segformer_backbone.py
File metadata and controls
163 lines (133 loc) · 5.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import keras
from keras_hub.src.api_export import keras_hub_export
from keras_hub.src.models.backbone import Backbone
@keras_hub_export("keras_hub.models.SegFormerBackbone")
class SegFormerBackbone(Backbone):
"""A Keras model implementing the SegFormer architecture for semantic segmentation.
This class implements the majority of the SegFormer architecture described in
[SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers]
(https://arxiv.org/abs/2105.15203) and [based on the TensorFlow implementation from DeepVision]
(https://github.com/DavidLandup0/deepvision/tree/main/deepvision/models/segmentation/segformer).
SegFormers are meant to be used with the MixTransformer (MiT) encoder family, and
and use a very lightweight all-MLP decoder head.
The MiT encoder uses a hierarchical transformer which outputs features at multiple scales,
similar to that of the hierarchical outputs typically associated with CNNs.
Args:
image_encoder: `keras.Model`. The backbone network for the model that is
used as a feature extractor for the SegFormer encoder.
Should be used with the MiT backbone model
(`keras_hub.models.MiTBackbone`) which was created
specifically for SegFormers.
num_classes: int, the number of classes for the detection model,
including the background class.
projection_filters: int, number of filters in the
convolution layer projecting the concatenated features into
a segmentation map. Defaults to 256`.
Example:
Using the class with a custom `backbone`:
```python
import keras_hub
backbone = keras_hub.models.MiTBackbone(
depths=[2, 2, 2, 2],
image_shape=(224, 224, 3),
hidden_dims=[32, 64, 160, 256],
num_layers=4,
blockwise_num_heads=[1, 2, 5, 8],
blockwise_sr_ratios=[8, 4, 2, 1],
max_drop_path_rate=0.1,
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
)
segformer_backbone = keras_hub.models.SegFormerBackbone(image_encoder=backbone, projection_filters=256)
```
Using the class with a preset `backbone`:
```python
import keras_hub
backbone = keras_hub.models.MiTBackbone.from_preset("mit_b0_ade20k_512")
segformer_backbone = keras_hub.models.SegFormerBackbone(image_encoder=backbone, projection_filters=256)
```
"""
def __init__(
self,
image_encoder,
projection_filters,
**kwargs,
):
if not isinstance(image_encoder, keras.layers.Layer) or not isinstance(
image_encoder, keras.Model
):
raise ValueError(
"Argument `image_encoder` must be a `keras.layers.Layer` instance "
f" or `keras.Model`. Received instead "
f"image_encoder={image_encoder} (of type {type(image_encoder)})."
)
# === Layers ===
inputs = keras.layers.Input(shape=image_encoder.input.shape[1:])
self.feature_extractor = keras.Model(
image_encoder.inputs, image_encoder.pyramid_outputs
)
features = self.feature_extractor(inputs)
# Get height and width of level one output
_, height, width, _ = features["P1"].shape
self.mlp_blocks = []
for feature_dim, feature in zip(image_encoder.hidden_dims, features):
self.mlp_blocks.append(
keras.layers.Dense(
projection_filters, name=f"linear_{feature_dim}"
)
)
self.resizing = keras.layers.Resizing(
height, width, interpolation="bilinear"
)
self.concat = keras.layers.Concatenate(axis=-1)
self.linear_fuse = keras.Sequential(
[
keras.layers.Conv2D(
filters=projection_filters, kernel_size=1, use_bias=False
),
keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9),
keras.layers.Activation("relu"),
]
)
# === Functional Model ===
# Project all multi-level outputs onto
# the same dimensionality and feature map shape
multi_layer_outs = []
for index, (feature_dim, feature) in enumerate(
zip(image_encoder.hidden_dims, features)
):
out = self.mlp_blocks[index](features[feature])
out = self.resizing(out)
multi_layer_outs.append(out)
# Concat now-equal feature maps
concatenated_outs = self.concat(multi_layer_outs[::-1])
# Fuse concatenated features into a segmentation map
seg = self.linear_fuse(concatenated_outs)
super().__init__(
inputs=inputs,
outputs=seg,
**kwargs,
)
# === Config ===
self.projection_filters = projection_filters
self.image_encoder = image_encoder
def get_config(self):
config = super().get_config()
config.update(
{
"projection_filters": self.projection_filters,
"image_encoder": keras.saving.serialize_keras_object(
self.image_encoder
),
}
)
return config
@classmethod
def from_config(cls, config):
if "image_encoder" in config and isinstance(
config["image_encoder"], dict
):
config["image_encoder"] = keras.layers.deserialize(
config["image_encoder"]
)
return super().from_config(config)