Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions config-jetson.ini
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ MinScore: 0.25
[PostProcessor]
MaxTrackFrame: 5
NMSThreshold: 0.98
CalibrationFrames: 100
; distance threshold for smart distancing in (cm)
DistThreshold: 150
; ditance mesurement method, CenterPointsDistance: compare center of pedestrian boxes together, FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them.
Expand Down
1 change: 1 addition & 0 deletions config-skeleton.ini
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ MinScore: 0.25
[PostProcessor]
MaxTrackFrame: 5
NMSThreshold: 0.98
CalibrationFrames: 100
; distance threshold for smart distancing in (cm)
DistThreshold: 150
; ditance mesurement method, CenterPointsDistance: compare center of pedestrian boxes together, FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them.
Expand Down
1 change: 1 addition & 0 deletions config-x86-openvino.ini
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ MinScore: 0.25
[PostProcessor]
MaxTrackFrame: 5
NMSThreshold: 0.98
CalibrationFrames: 100
; distance threshold for smart distancing in (cm)
DistThreshold: 150
DistMethod: CenterPointsDistance
Expand Down
1 change: 1 addition & 0 deletions config-x86.ini
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ MinScore: 0.25
[PostProcessor]
MaxTrackFrame: 5
NMSThreshold: 0.98
CalibrationFrames: 100
; distance threshold for smart distancing in (cm)
DistThreshold: 150
DistMethod: CenterPointsDistance
Expand Down
106 changes: 104 additions & 2 deletions libs/core.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import copy
import cv2 as cv
import numpy as np
import math
Expand Down Expand Up @@ -43,6 +44,10 @@ def __init__(self, config):
self.dist_method = self.config.get_section_dict("PostProcessor")["DistMethod"]
self.dist_threshold = self.config.get_section_dict("PostProcessor")["DistThreshold"]
self.resolution = tuple([int(i) for i in self.config.get_section_dict('App')['Resolution'].split(',')])
self.calibration_frames = int(self.config.get_section_dict("PostProcessor")["CalibrationFrames"])
self.box_priors = WelfordBoxDist(*self.resolution)
self.background_subtractor = cv.createBackgroundSubtractorMOG2()


def __process(self, cv_image):
"""
Expand All @@ -56,7 +61,9 @@ def __process(self, cv_image):
resized_image = cv.resize(cv_image, tuple(self.image_size[:2]))
rgb_resized_image = cv.cvtColor(resized_image, cv.COLOR_BGR2RGB)
tmp_objects_list = self.detector.inference(rgb_resized_image)
[w,h] = self.resolution
self.foreground_mask = self.background_subtractor.apply(cv_image)
self.foreground_mask = cv.threshold(self.foreground_mask, 128, 255, cv.THRESH_BINARY)[1] / 255
[w, h] = self.resolution

for obj in tmp_objects_list:
box = obj["bbox"]
Expand Down Expand Up @@ -159,6 +166,8 @@ def calculate_distancing(self, objects_list):
1. omitting large boxes by filtering boxes which are biger than the 1/4 of the size the image.
2. omitting duplicated boxes by applying an auxilary non-maximum-suppression.
3. apply a simple object tracker to make the detection more robust.
4. filter bounding boxes based on boxes prior distribution.
5. filter bounding boxes based on the amount of foreground pixels exist on the box.

params:
object_list: a list of dictionaries. each dictionary has attributes of a detected object such as
Expand All @@ -174,6 +183,7 @@ def calculate_distancing(self, objects_list):
new_objects_list = self.non_max_suppression_fast(new_objects_list,
float(self.config.get_section_dict("PostProcessor")[
"NMSThreshold"]))
new_objects_list = self.filter_boxes(new_objects_list)
tracked_boxes = self.tracker.update(new_objects_list)
new_objects_list = [tracked_boxes[i] for i in tracked_boxes.keys()]
for i, item in enumerate(new_objects_list):
Expand All @@ -184,6 +194,58 @@ def calculate_distancing(self, objects_list):

return new_objects_list, distances

def filter_boxes(self, objects_list):
"""
filter bounding boxes based on boxes prior distribution and amount of foreground pixels exist in the box.
there is a training phase where the prior means and standard deviations and background/foreground distribution
will be calculated and then this distribution will be used to filter the bounding boxes.
params:
objects_list: A list of dictionaries. each dictionary has attributes of a detected object such as
"id", "centroid" (a tuple of the normalized centroid coordinates (cx,cy,w,h) of the box) and "bbox" (a tuple
of the normalized (xmin,ymin,xmax,ymax) coordinate of the box)

returns:
objects_list: a filtered version of input

"""

if self.box_priors.num_calls < self.calibration_frames: # self.calibration_frames is training number of frames.
self.box_priors.update(objects_list)
else:
if self.box_priors.num_calls == self.calibration_frames:
self.box_priors.compute_stats()
objects_list_copy = copy.copy(objects_list)
for box in objects_list_copy:
cx, cy, w, h = [int(i) for i in box["centroidReal"]]
cx = min(cx, self.resolution[0] - 1)
cy = min(cy, self.resolution[1] - 1)
# for filtering criteria we used three-sigma rule and additional condition that ensures that
# enough number of boxes has been detected in training phase for a centroid pixel.
condition_w = self.box_priors.mean[0, cy, cx] - 3 * self.box_priors.std[0, cy, cx] <\
w\
< self.box_priors.mean[0, cy, cx] + 3 * self.box_priors.std[0, cy, cx]
condition_h = self.box_priors.mean[1, cy, cx] - 3 * self.box_priors.std[1, cy, cx] <\
h\
< self.box_priors.mean[1, cy, cx] + 3 * self.box_priors.std[1, cy, cx]
if (not (condition_w and condition_h)) and self.box_priors.count[cy, cx] > 10:
objects_list.remove(box)
continue
# ========================================================================================== #
# filtering based on number of foreground pixel
# ========================================================================================== #
x_min = max(0, int(box["bboxReal"][0]))
y_min = max(0, int(box["bboxReal"][1]))
x_max = min(self.resolution[0] - 1, int(box["bboxReal"][2]))
y_max = min(self.resolution[1] - 1, int(box["bboxReal"][3]))
bbox_mask_window = self.foreground_mask[y_min:y_max, x_min:x_max]
foreground_portion = bbox_mask_window.sum() / bbox_mask_window.size
if foreground_portion < .1:
objects_list.remove(box)
return objects_list




@staticmethod
def ignore_large_boxes(object_list):

Expand Down Expand Up @@ -341,4 +403,44 @@ def calculate_box_distances(self, nn_out):
return distances_asarray



class WelfordBoxDist:
"""
This module will compute the mean, variance and standard deviation of width and height of bounding boxes for each
centroid point recurrently with Welford algorithm.
params:
img_width: width of the desired output frame
img_height: width of the desired output frame
"""
def __init__(self, img_width, img_height):
self.mean = np.zeros((2, img_height, img_width))
self.m2 = np.ones((2, img_height, img_width))
self.count = np.zeros((img_height, img_width), dtype=int)
self.num_calls = 0
self.img_width = img_width
self.img_height = img_height

def update(self, new_objects):
"""
this method will update the mean and the numerator (sum(x_i - mean) ** 2) based on new received bounding boxes
with Welford algorithm.
Args: new_objects: a list of dictionaries. each dictionary has attributes of a detected object such as
"id", "centroidReal" (a tuple of the centroid coordinates (cx,cy,w,h) of the box) and "bboxReal" (a tuple
of the (xmin,ymin,xmax,ymax) coordinate of the box)
"""
self.num_calls += 1
for item in new_objects:
cx, cy, w, h = [int(i) for i in item["centroidReal"]]
cx = min(cx, self.img_width - 1)
cy = min(cy, self.img_height - 1)
self.count[cy, cx] += 1
offset_old = [w, h] - self.mean[:, cy, cx]
self.mean[:, cy, cx] += offset_old / self.count[cy, cx]
offset_new = [w, h] - self.mean[:, cy, cx]
self.m2[:, cy, cx] += offset_old * offset_new

def compute_stats(self):
""" calculate final mean, variance and standard deviation"""
self.num_calls += 1
self.mean = self.mean
self.var = self.m2 / np.where(self.count != 0, self.count, 1.0)
self.std = np.sqrt(self.var)