Skip to content

Commit 3b16d19

Browse files
author
Cedric Carl-Franek Kränzle
committed
Merge remote-tracking branch 'origin/master' into fork/cedric-cfk/master
2 parents 169b2fe + cede0ae commit 3b16d19

File tree

10 files changed

+235
-56
lines changed

10 files changed

+235
-56
lines changed

docs/structure.py

Lines changed: 52 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,15 @@
230230
image.calculate_image_center,
231231
image.get_affine_transform,
232232
image.get_scaling_factor,
233-
image.scale_resize
233+
image.scale_resize,
234+
image.compute_resizing_shape,
235+
image.pad_image,
236+
image.equalize_histogram,
237+
image.invert_colors,
238+
image.posterize,
239+
image.solarize,
240+
image.cutout,
241+
image.add_gaussian_noise,
234242
],
235243
},
236244

@@ -284,7 +292,8 @@
284292
standard.max_pooling_2d,
285293
standard.predict,
286294
standard.predict_with_nones,
287-
standard.weighted_average
295+
standard.weighted_average,
296+
standard.compute_common_row_indices,
288297
],
289298
},
290299

@@ -346,7 +355,15 @@
346355
{
347356
'page': 'models/pose_estimation.md',
348357
'functions': [
349-
models.HigherHRNet
358+
models.HigherHRNet,
359+
models.EfficientPosePhi0,
360+
models.EfficientPosePhi1,
361+
models.EfficientPosePhi2,
362+
models.EfficientPosePhi3,
363+
models.EfficientPosePhi4,
364+
models.EfficientPosePhi5,
365+
models.EfficientPosePhi6,
366+
models.EfficientPosePhi7,
350367
],
351368
},
352369

@@ -358,7 +375,10 @@
358375
models.layers.Conv2DNormalization,
359376
models.layers.SubtractScalar,
360377
models.layers.ExpectedValue2D,
361-
models.layers.ExpectedDepth
378+
models.layers.ExpectedDepth,
379+
models.layers.ReduceMean,
380+
models.layers.Sigmoid,
381+
models.layers.Add,
362382
],
363383
},
364384

@@ -451,7 +471,16 @@
451471
processors.FlipLeftRightImage,
452472
processors.DivideStandardDeviationImage,
453473
processors.ScaledResize,
454-
processors.BufferImages
474+
processors.BufferImages,
475+
processors.PadImage,
476+
processors.EqualizeHistogram,
477+
processors.InvertColors,
478+
processors.Posterize,
479+
processors.Solarize,
480+
processors.SharpenImage,
481+
processors.Cutout,
482+
processors.AddGaussianNoise,
483+
455484
]
456485
},
457486

@@ -580,7 +609,16 @@
580609
'classes': [
581610
processors.SolvePNP,
582611
processors.SolveChangingObjectPnPRANSAC,
583-
processors.Translation3DFromBoxWidth
612+
processors.Translation3DFromBoxWidth,
613+
processors.MatchPoses,
614+
processors.RotationMatrixToAxisAngle,
615+
processors.ConcatenatePoses,
616+
processors.ConcatenateScale,
617+
processors.AugmentPose6D,
618+
processors.ToPose6D,
619+
processors.BoxesWithOneHotVectorsToPose6D,
620+
processors.BoxesToPose6D,
621+
processors.BoxesWithClassArgToPose6D,
584622
]
585623
},
586624

@@ -632,7 +670,8 @@
632670
processors.PrintTopics,
633671
processors.FloatToBoolean,
634672
processors.NoneConverter,
635-
processors.AveragePredictions
673+
processors.AveragePredictions,
674+
processors.ComputeCommonRowIndices,
636675
]
637676
},
638677

@@ -731,7 +770,12 @@
731770
pipelines.HeadPoseKeypointNet2D32,
732771
pipelines.SingleInstancePIX2POSE6D,
733772
pipelines.MultiInstancePIX2POSE6D,
734-
pipelines.MultiInstanceMultiClassPIX2POSE6D
773+
pipelines.MultiInstanceMultiClassPIX2POSE6D,
774+
pipelines.AugmentColor,
775+
pipelines.AugmentEfficientPose,
776+
pipelines.EfficientDetPreprocess,
777+
pipelines.EfficientDetPostprocess,
778+
pipelines.EstimateEfficientPose,
735779
]
736780
},
737781

examples/hand_detection/model.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from tensorflow.keras import Model
2+
from paz.models import SSD512
3+
from paz.models.detection.utils import create_multibox_head
4+
5+
6+
def SSD512Custom(num_classes, num_priors=[4, 6, 6, 6, 6, 4, 4], l2_loss=5e-4,
7+
trainable_base=False):
8+
base_model = SSD512(base_weights='COCO', head_weights='COCO',
9+
trainable_base=trainable_base)
10+
branch_names = ['branch_1', 'branch_2', 'branch_3', 'branch_4',
11+
'branch_5', 'branch_6', 'branch_7']
12+
branch_tensors = []
13+
for branch_name in branch_names:
14+
branch_layer = base_model.get_layer(branch_name)
15+
branch_tensors.append(branch_layer.output)
16+
17+
output_tensor = create_multibox_head(
18+
branch_tensors, num_classes, num_priors, l2_loss)
19+
model = Model(base_model.input, output_tensor, name='SSD512Custom')
20+
model.prior_boxes = base_model.prior_boxes
21+
return model

examples/hand_detection/open_images.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
from paz import processors as pr
2-
from paz.abstract import Loader
3-
4-
51
import os
62
import glob
73
import csv
84
import numpy as np
5+
from paz import processors as pr
6+
from paz.abstract import Loader
7+
8+
root_path = os.path.expanduser('~')
99

1010

1111
class OpenImagesV6(Loader):
@@ -81,7 +81,7 @@ def load_data(self):
8181

8282
if __name__ == '__main__':
8383
root_path = os.path.expanduser('~')
84-
path = os.path.join(root_path, '/home/octavio/fiftyone/open-images-v6/')
84+
path = os.path.join(root_path, 'fiftyone/open-images-v6/')
8585

8686
train_data_manager = OpenImagesV6(
8787
path, pr.TRAIN, ['background', 'Human hand'])

examples/hand_detection/train.py

Lines changed: 5 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import argparse
44
from datetime import datetime
55

6-
# from tensorflow.keras.optimizers import SGD
76
from tensorflow.keras.optimizers import Adam
87
from tensorflow.keras.callbacks import (
98
CSVLogger, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau)
@@ -14,7 +13,7 @@
1413
from paz.optimization import MultiBoxLoss
1514

1615
from open_images import OpenImagesV6
17-
from paz.models import SSD300
16+
from model import SSD512Custom
1817

1918
root_path = os.path.expanduser('~')
2019
DEFAULT_DATA_PATH = os.path.join(root_path, 'hand_dataset/hand_dataset/')
@@ -51,43 +50,20 @@
5150

5251

5352
# loading datasets
54-
"""
53+
path = os.path.join(root_path, 'fiftyone/open-images-v6/')
5554
data_managers, datasets = [], []
56-
for split in [pr.TRAIN, pr.VAL, pr.TEST]:
57-
data_manager = HandDataset(args.data_path, split)
58-
data = data_manager.load_data()
59-
data_managers.append(data_manager)
60-
datasets.append(data)
61-
62-
from egohand_dataset import EgoHands
63-
path = os.path.join(root_path, 'Downloads/egohands/_LABELLED_SAMPLES/')
64-
data_manager = EgoHands(path)
65-
ego_data = data_manager.load_data()
66-
datasets[0].extend(ego_data)
67-
"""
68-
69-
path = os.path.join(root_path, '/home/octavio/Datasets/fiftyone/open-images-v6/')
70-
data_managers, datasets = [], []
71-
for split in [pr.TRAIN, pr.VAL, pr.TEST]:
55+
for split in [pr.TRAIN, pr.VAL]:
7256
data_manager = OpenImagesV6(path, split, ['background', 'Human hand'])
7357
data = data_manager.load_data()
7458
data_managers.append(data_manager)
7559
datasets.append(data)
7660

77-
7861
# instantiating model
7962
num_classes = data_managers[0].num_classes
80-
from model import SSD512Custom
8163

8264
model = SSD512Custom(num_classes, trainable_base=True)
83-
"""
84-
model = SSD300(num_classes, base_weights='VOC', head_weights=None,
85-
trainable_base=True)
86-
model.load_weights('experiments/SSD300_RUN_00_10-06-2022_16-55-40/model_weights.hdf5')
87-
"""
8865
size = model.input_shape[1]
8966

90-
9167
# Instantiating loss and metrics
9268
# optimizer = SGD(args.learning_rate, args.momentum)
9369
optimizer = Adam(args.learning_rate, amsgrad=True)
@@ -127,7 +103,7 @@
127103
log = CSVLogger(os.path.join(experiment_path, 'optimization.log'))
128104
stop = EarlyStopping(patience=args.stop_patience, verbose=1)
129105
plateau = ReduceLROnPlateau(patience=args.reduce_patience, verbose=1)
130-
save_name = os.path.join(experiment_path, 'model_weights.hdf5')
106+
save_name = os.path.join(experiment_path, 'model.weights.h5')
131107
save = ModelCheckpoint(save_name, verbose=1, save_best_only=True,
132108
save_weights_only=True)
133109

@@ -137,6 +113,4 @@
137113
epochs=args.num_epochs,
138114
verbose=1,
139115
callbacks=[log, stop, plateau, save],
140-
validation_data=sequencers[1],
141-
use_multiprocessing=True,
142-
workers=6)
116+
validation_data=sequencers[1])

paz/models/classification/xception.py

Lines changed: 110 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
from tensorflow.keras.layers import GlobalAveragePooling2D
44
from tensorflow.keras import Model
55
from tensorflow.keras.regularizers import l2
6-
from tensorflow.keras.models import load_model
76
from tensorflow.keras.utils import get_file
7+
from keras import layers
88

99

1010
URL = 'https://github.com/oarriaga/altamira-data/releases/download/v0.6/'
@@ -84,6 +84,112 @@ def build_xception(
8484
return model
8585

8686

87+
def build_minixception(input_shape, num_classes, l2_reg=0.01):
88+
"""Function for instantiating an Mini-Xception model.
89+
90+
# Arguments
91+
input_shape: List corresponding to the input shape
92+
of the model.
93+
num_classes: Integer.
94+
l2_reg. Float. L2 regularization used
95+
in the convolutional kernels.
96+
97+
# Returns
98+
Tensorflow-Keras model.
99+
"""
100+
101+
regularization = l2(l2_reg)
102+
103+
# base
104+
img_input = Input(input_shape)
105+
x = Conv2D(5, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
106+
use_bias=False)(img_input)
107+
x = BatchNormalization()(x)
108+
x = Activation('relu')(x)
109+
x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
110+
use_bias=False)(x)
111+
x = BatchNormalization()(x)
112+
x = Activation('relu')(x)
113+
114+
# module 1
115+
residual = Conv2D(16, (1, 1), strides=(2, 2),
116+
padding='same', use_bias=False)(x)
117+
residual = BatchNormalization()(residual)
118+
119+
x = SeparableConv2D(16, (3, 3), padding='same',
120+
depthwise_regularizer=regularization,
121+
use_bias=False)(x)
122+
x = BatchNormalization()(x)
123+
x = Activation('relu')(x)
124+
x = SeparableConv2D(16, (3, 3), padding='same',
125+
depthwise_regularizer=regularization,
126+
use_bias=False)(x)
127+
x = BatchNormalization()(x)
128+
129+
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
130+
x = layers.add([x, residual])
131+
132+
# module 2
133+
residual = Conv2D(32, (1, 1), strides=(2, 2),
134+
padding='same', use_bias=False)(x)
135+
residual = BatchNormalization()(residual)
136+
137+
x = SeparableConv2D(32, (3, 3), padding='same',
138+
depthwise_regularizer=regularization,
139+
use_bias=False)(x)
140+
x = BatchNormalization()(x)
141+
x = Activation('relu')(x)
142+
x = SeparableConv2D(32, (3, 3), padding='same',
143+
depthwise_regularizer=regularization,
144+
use_bias=False)(x)
145+
x = BatchNormalization()(x)
146+
147+
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
148+
x = layers.add([x, residual])
149+
150+
# module 3
151+
residual = Conv2D(64, (1, 1), strides=(2, 2),
152+
padding='same', use_bias=False)(x)
153+
residual = BatchNormalization()(residual)
154+
155+
x = SeparableConv2D(64, (3, 3), padding='same',
156+
depthwise_regularizer=regularization,
157+
use_bias=False)(x)
158+
x = BatchNormalization()(x)
159+
x = Activation('relu')(x)
160+
x = SeparableConv2D(64, (3, 3), padding='same',
161+
depthwise_regularizer=regularization,
162+
use_bias=False)(x)
163+
x = BatchNormalization()(x)
164+
165+
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
166+
x = layers.add([x, residual])
167+
168+
# module 4
169+
residual = Conv2D(128, (1, 1), strides=(1, 1),
170+
padding='same', use_bias=False)(x)
171+
residual = BatchNormalization()(residual)
172+
173+
x = SeparableConv2D(128, (3, 3), padding='same',
174+
depthwise_regularizer=regularization,
175+
use_bias=False)(x)
176+
x = BatchNormalization()(x)
177+
x = Activation('relu')(x)
178+
x = SeparableConv2D(128, (3, 3), padding='same',
179+
depthwise_regularizer=regularization,
180+
use_bias=False)(x)
181+
x = BatchNormalization()(x)
182+
183+
x = layers.add([x, residual])
184+
185+
x = Conv2D(num_classes, (3, 3), padding='same')(x)
186+
x = GlobalAveragePooling2D()(x)
187+
output = Activation('softmax', name='predictions')(x)
188+
189+
model = Model(img_input, output)
190+
return model
191+
192+
87193
def MiniXception(input_shape, num_classes, weights=None):
88194
"""Build MiniXception (see references).
89195
@@ -101,9 +207,10 @@ def MiniXception(input_shape, num_classes, weights=None):
101207
Gender Classification](https://arxiv.org/abs/1710.07557)
102208
"""
103209
if weights == 'FER':
104-
filename = 'fer2013_mini_XCEPTION.119-0.65.hdf5'
210+
filename = 'fer2013_mini_XCEPTION.hdf5'
105211
path = get_file(filename, URL + filename, cache_subdir='paz/models')
106-
model = load_model(path)
212+
model = build_minixception(input_shape, num_classes)
213+
model.load_weights(path)
107214
else:
108215
stem_kernels = [32, 64]
109216
block_data = [128, 128, 256, 256, 512, 512, 1024]

0 commit comments

Comments
 (0)