Skip to content
This repository was archived by the owner on Mar 19, 2023. It is now read-only.

Commit 4c4d5d8

Browse files
authored
Merge pull request #193 from robmarkcole/fr-140
Fr 140
2 parents 5060ad1 + e5805b0 commit 4c4d5d8

File tree

2 files changed

+57
-32
lines changed

2 files changed

+57
-32
lines changed

README.md

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,16 +27,19 @@ image_processing:
2727
port: 80
2828
api_key: mysecretkey
2929
# custom_model: mask
30+
# confidence: 80
3031
save_file_folder: /config/snapshots/
3132
save_timestamped_file: True
3233
# roi_x_min: 0.35
3334
roi_x_max: 0.8
3435
#roi_y_min: 0.4
3536
roi_y_max: 0.8
3637
targets:
37-
- person
38-
- vehicle
39-
- dog
38+
- target: person
39+
- target: vehicle
40+
confidence: 60
41+
- target: car
42+
confidence: 40
4043
source:
4144
- entity_id: camera.local_file
4245
```
@@ -47,6 +50,7 @@ Configuration variables:
4750
- **api_key**: (Optional) Any API key you have set.
4851
- **timeout**: (Optional, default 10 seconds) The timeout for requests to deepstack.
4952
- **custom_model**: (Optional) The name of a custom model if you are using one. Don't forget to add the targets from the custom model below
53+
- **confidence**: (Optional) The confidence (in %) above which detected targets are counted in the sensor state. Default value: 80
5054
- **save_file_folder**: (Optional) The folder to save processed images to. Note that folder path should be added to [whitelist_external_dirs](https://www.home-assistant.io/docs/configuration/basic/)
5155
- **save_timestamped_file**: (Optional, default `False`, requires `save_file_folder` to be configured) Save the processed image with the time of detection in the filename.
5256
- **show_boxes**: (optional, default `True`), if `False` bounding boxes are not shown on saved images
@@ -55,8 +59,7 @@ Configuration variables:
5559
- **roi_y_min**: (optional, default 0), range 0-1, must be less than roi_y_max
5660
- **roi_y_max**: (optional, default 1), range 0-1, must be more than roi_y_min
5761
- **source**: Must be a camera.
58-
- **targets**: The list of target object names and/or `object_type`, default `person`.
59-
- **confidence**: (Optional) The confidence (in %) above which detected targets are counted in the sensor state. Default value: 80
62+
- **targets**: The list of target object names and/or `object_type`, default `person`. Optionally a `confidence` can be set for this target, if not the default confidence is used. Note the minimum possible confidence is 10%.
6063

6164
For the ROI, the (x=0,y=0) position is the top left pixel of the image, and the (x=1,y=1) position is the bottom right pixel of the image. It might seem a bit odd to have y running from top to bottom of the image, but that is the coordinate system used by pillow.
6265

custom_components/deepstack_object/image_processing.py

Lines changed: 49 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,11 @@
2323
from homeassistant.util.pil import draw_box
2424
from homeassistant.components.image_processing import (
2525
ATTR_CONFIDENCE,
26+
CONF_CONFIDENCE,
2627
CONF_ENTITY_ID,
2728
CONF_NAME,
2829
CONF_SOURCE,
30+
DEFAULT_CONFIDENCE,
2931
DOMAIN,
3032
PLATFORM_SCHEMA,
3133
ImageProcessingEntity,
@@ -60,8 +62,11 @@
6062
PERSON = "person"
6163
VEHICLE = "vehicle"
6264
VEHICLES = ["bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck"]
65+
OBJECT_TYPES = [ANIMAL, OTHER, PERSON, VEHICLE]
66+
6367

6468
CONF_API_KEY = "api_key"
69+
CONF_TARGET = "target"
6570
CONF_TARGETS = "targets"
6671
CONF_TIMEOUT = "timeout"
6772
CONF_SAVE_FILE_FOLDER = "save_file_folder"
@@ -75,7 +80,7 @@
7580

7681
DATETIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
7782
DEFAULT_API_KEY = ""
78-
DEFAULT_TARGETS = [PERSON]
83+
DEFAULT_TARGETS = [{CONF_TARGET: PERSON}]
7984
DEFAULT_TIMEOUT = 10
8085
DEFAULT_ROI_Y_MIN = 0.0
8186
DEFAULT_ROI_Y_MAX = 1.0
@@ -93,11 +98,19 @@
9398
FILE = "file"
9499
OBJECT = "object"
95100
SAVED_FILE = "saved_file"
101+
MIN_CONFIDENCE = 0.1
96102

97103
# rgb(red, green, blue)
98104
RED = (255, 0, 0) # For objects within the ROI
99105
GREEN = (0, 255, 0) # For ROI box
100-
YELLOW = (255, 255, 0) # For objects outside the ROI
106+
YELLOW = (255, 255, 0) # Unused
107+
108+
TARGETS_SCHEMA = {
109+
vol.Required(CONF_TARGET): cv.string,
110+
vol.Optional(CONF_CONFIDENCE): vol.All(
111+
vol.Coerce(float), vol.Range(min=10, max=100)
112+
),
113+
}
101114

102115

103116
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
@@ -108,7 +121,7 @@
108121
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
109122
vol.Optional(CONF_CUSTOM_MODEL, default=""): cv.string,
110123
vol.Optional(CONF_TARGETS, default=DEFAULT_TARGETS): vol.All(
111-
cv.ensure_list, [cv.string]
124+
cv.ensure_list, [vol.Schema(TARGETS_SCHEMA)]
112125
),
113126
vol.Optional(CONF_ROI_Y_MIN, default=DEFAULT_ROI_Y_MIN): cv.small_float,
114127
vol.Optional(CONF_ROI_X_MIN, default=DEFAULT_ROI_X_MIN): cv.small_float,
@@ -196,7 +209,6 @@ def setup_platform(hass, config, add_devices, discovery_info=None):
196209
if save_file_folder:
197210
save_file_folder = Path(save_file_folder)
198211

199-
targets = [t.lower() for t in config[CONF_TARGETS]] # ensure lower case
200212
entities = []
201213
for camera in config[CONF_SOURCE]:
202214
object_entity = ObjectClassifyEntity(
@@ -205,8 +217,8 @@ def setup_platform(hass, config, add_devices, discovery_info=None):
205217
api_key=config.get(CONF_API_KEY),
206218
timeout=config.get(CONF_TIMEOUT),
207219
custom_model=config.get(CONF_CUSTOM_MODEL),
208-
targets=targets,
209-
confidence=config.get(ATTR_CONFIDENCE),
220+
targets=config.get(CONF_TARGETS),
221+
confidence=config.get(CONF_CONFIDENCE),
210222
roi_y_min=config[CONF_ROI_Y_MIN],
211223
roi_x_min=config[CONF_ROI_X_MIN],
212224
roi_y_max=config[CONF_ROI_Y_MAX],
@@ -250,12 +262,18 @@ def __init__(
250262
port=port,
251263
api_key=api_key,
252264
timeout=timeout,
253-
min_confidence=confidence / 100,
265+
min_confidence=MIN_CONFIDENCE,
254266
custom_model=custom_model,
255267
)
256268
self._custom_model = custom_model
257-
self._targets = targets
258269
self._confidence = confidence
270+
self._targets = targets
271+
for target in self._targets:
272+
if CONF_CONFIDENCE not in target.keys():
273+
target[CONF_CONFIDENCE] = self._confidence
274+
self._targets_names = [
275+
target[CONF_TARGET] for target in targets
276+
] # can be a name or a type
259277
self._camera = camera_entity
260278
if name:
261279
self._name = name
@@ -266,7 +284,6 @@ def __init__(
266284
self._state = None
267285
self._objects = [] # The parsed raw data
268286
self._targets_found = []
269-
self._summary = {}
270287

271288
self._roi_dict = {
272289
"y_min": roi_y_min,
@@ -290,7 +307,6 @@ def process_image(self, image):
290307
self._state = None
291308
self._objects = [] # The parsed raw data
292309
self._targets_found = []
293-
self._summary = {}
294310
saved_image_path = None
295311

296312
try:
@@ -299,15 +315,23 @@ def process_image(self, image):
299315
_LOGGER.error("Deepstack error : %s", exc)
300316
return
301317

302-
self._summary = ds.get_objects_summary(predictions)
303318
self._objects = get_objects(predictions, self._image_width, self._image_height)
304-
self._targets_found = [
305-
obj
306-
for obj in self._objects
307-
if (obj["name"] or obj["object_type"] in self._targets)
308-
and (obj["confidence"] > self._confidence)
309-
and (object_in_roi(self._roi_dict, obj["centroid"]))
310-
]
319+
self._targets_found = []
320+
321+
for obj in self._objects:
322+
if obj["name"] or obj["object_type"] in self._targets_names:
323+
## Then check if the type has a configured confidence, if yes assign
324+
## Then if a confidence for a named object, this takes precedence over type confidence
325+
confidence = self._confidence
326+
for target in self._targets:
327+
if target[CONF_TARGET] == obj["object_type"]:
328+
confidence = target[CONF_CONFIDENCE]
329+
for target in self._targets:
330+
if target[CONF_TARGET] == obj["name"]:
331+
confidence = target[CONF_CONFIDENCE]
332+
if obj["confidence"] > confidence:
333+
if object_in_roi(self._roi_dict, obj["centroid"]):
334+
self._targets_found.append(obj)
311335

312336
self._state = len(self._targets_found)
313337
if self._state > 0:
@@ -350,19 +374,17 @@ def should_poll(self):
350374
def device_state_attributes(self) -> Dict:
351375
"""Return device specific state attributes."""
352376
attr = {}
353-
for target in self._targets:
354-
attr[f"ROI {target} count"] = len(
355-
[t for t in self._targets_found if t["name"] == target]
356-
)
357-
attr[f"ALL {target} count"] = len(
358-
[t for t in self._objects if t["name"] == target]
359-
)
377+
attr["targets"] = self._targets
378+
attr["targets_found"] = [
379+
{obj["name"]: obj["confidence"]} for obj in self._targets_found
380+
]
360381
if self._last_detection:
361382
attr["last_target_detection"] = self._last_detection
362383
if self._custom_model:
363384
attr["custom_model"] = self._custom_model
364-
attr["targets"] = self._targets
365-
attr["summary"] = self._summary
385+
attr["all_objects"] = [
386+
{obj["name"]: obj["confidence"]} for obj in self._objects
387+
]
366388
if self._save_file_folder:
367389
attr[CONF_SAVE_FILE_FOLDER] = str(self._save_file_folder)
368390
if self._save_timestamped_file:

0 commit comments

Comments
 (0)