-
Notifications
You must be signed in to change notification settings - Fork 330
Expand file tree
/
Copy pathsegformer_image_segmenter.py
More file actions
189 lines (160 loc) · 6.02 KB
/
segformer_image_segmenter.py
File metadata and controls
189 lines (160 loc) · 6.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
import keras
from keras_hub.src.api_export import keras_hub_export
from keras_hub.src.models.image_segmenter import ImageSegmenter
from keras_hub.src.models.segformer.segformer_backbone import SegFormerBackbone
from keras_hub.src.models.segformer.segformer_image_segmenter_preprocessor import ( # noqa: E501
SegFormerImageSegmenterPreprocessor,
)
@keras_hub_export("keras_hub.models.SegFormerImageSegmenter")
class SegFormerImageSegmenter(ImageSegmenter):
"""A Keras model implementing SegFormer for semantic segmentation.
This class implements the segmentation head of the SegFormer architecture
described in [SegFormer: Simple and Efficient Design for Semantic
Segmentation with Transformers] (https://arxiv.org/abs/2105.15203) and
[based on the TensorFlow implementation from DeepVision]
(https://github.com/DavidLandup0/deepvision/tree/main/deepvision/models/segmentation/segformer).
SegFormers are meant to be used with the MixTransformer (MiT) encoder
family, and and use a very lightweight all-MLP decoder head.
The MiT encoder uses a hierarchical transformer which outputs features at
multiple scales, similar to that of the hierarchical outputs typically
associated with CNNs.
Args:
image_encoder: `keras.Model`. The backbone network for the model that is
used as a feature extractor for the SegFormer encoder. It is
*intended* to be used only with the MiT backbone model
(`keras_hub.models.MiTBackbone`) which was created specifically for
SegFormers. Alternatively, can be a `keras_hub.models.Backbone` a
model subclassing `keras_hub.models.FeaturePyramidBackbone`, or a
`keras.Model` that has a `pyramid_outputs` property which is a
dictionary with keys "P2", "P3", "P4", and "P5" and layer names as
values.
num_classes: int, the number of classes for the detection model,
including the background class.
projection_filters: int, number of filters in the
convolution layer projecting the concatenated features into a
segmentation map. Defaults to 256`.
dropout_rate: float. The dropout rate to apply before the
segmentation head. Defaults to `0.1`.
Example:
Using presets:
```python
segmenter = keras_hub.models.SegFormerImageSegmenter.from_preset(
"segformer_b0_ade20k_512"
)
images = np.random.rand(1, 512, 512, 3)
segformer(images)
```
Using the SegFormer backbone:
```python
encoder = keras_hub.models.MiTBackbone.from_preset(
"mit_b0_ade20k_512"
)
backbone = keras_hub.models.SegFormerBackbone(
image_encoder=encoder,
projection_filters=256,
)
```
Using the SegFormer backbone with a custom encoder:
```python
images = np.ones(shape=(1, 96, 96, 3))
labels = np.zeros(shape=(1, 96, 96, 1))
encoder = keras_hub.models.MiTBackbone(
depths=[2, 2, 2, 2],
image_shape=(96, 96, 3),
hidden_dims=[32, 64, 160, 256],
num_layers=4,
blockwise_num_heads=[1, 2, 5, 8],
blockwise_sr_ratios=[8, 4, 2, 1],
max_drop_path_rate=0.1,
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
)
backbone = keras_hub.models.SegFormerBackbone(
image_encoder=encoder,
projection_filters=256,
)
segformer = keras_hub.models.SegFormerImageSegmenter(
backbone=backbone,
num_classes=4,
)
segformer(images
```
Using the segmentor class with a preset backbone:
```python
image_encoder = keras_hub.models.MiTBackbone.from_preset(
"mit_b0_ade20k_512"
)
backbone = keras_hub.models.SegFormerBackbone(
image_encoder=encoder,
projection_filters=256,
)
segformer = keras_hub.models.SegFormerImageSegmenter(
backbone=backbone,
num_classes=4,
)
```
"""
backbone_cls = SegFormerBackbone
preprocessor_cls = SegFormerImageSegmenterPreprocessor
def __init__(
self,
backbone,
num_classes,
preprocessor=None,
dropout_rate=0.1,
**kwargs,
):
if not isinstance(backbone, keras.layers.Layer) or not isinstance(
backbone, keras.Model
):
raise ValueError(
"Argument `backbone` must be a `keras.layers.Layer` instance "
f" or `keras.Model`. Received instead "
f"backbone={backbone} (of type {type(backbone)})."
)
# === Layers ===
inputs = backbone.input
self.backbone = backbone
self.preprocessor = preprocessor
self.dropout = keras.layers.Dropout(dropout_rate)
self.output_segmentation_head = keras.layers.Conv2D(
filters=num_classes, kernel_size=1, strides=1
)
self.resizing = keras.layers.Resizing(
height=inputs.shape[1],
width=inputs.shape[2],
interpolation="bilinear",
)
# === Functional Model ===
x = self.backbone(inputs)
x = self.dropout(x)
x = self.output_segmentation_head(x)
output = self.resizing(x)
super().__init__(
inputs=inputs,
outputs=output,
**kwargs,
)
# === Config ===
self.num_classes = num_classes
self.backbone = backbone
self.dropout_rate = dropout_rate
def get_config(self):
config = super().get_config()
config.update(
{
"num_classes": self.num_classes,
"backbone": keras.saving.serialize_keras_object(self.backbone),
"dropout_rate": self.dropout_rate,
}
)
return config
@classmethod
def from_config(cls, config):
if "image_encoder" in config and isinstance(
config["image_encoder"], dict
):
config["image_encoder"] = keras.layers.deserialize(
config["image_encoder"]
)
return super().from_config(config)