Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 13 additions & 2 deletions .github/workflows/geti-inspect.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,13 @@ jobs:
with:
version: "0.9.18"

- name: Install OpenCV dependencies
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y libgl1 libglib2.0-0
sudo apt-get install -y libgl1 libglib2.0-0 libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config

- name: Install build tools
run: sudo apt-get install -y build-essential pkg-config

- name: Prepare venv and install Python dependencies
working-directory: application/backend
Expand Down Expand Up @@ -82,6 +85,14 @@ jobs:
with:
version: "0.9.18"

- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config

- name: Install build tools
run: sudo apt-get install -y build-essential pkg-config

- name: Prepare venv and install Python dependencies
working-directory: application/backend
run: |
Expand Down
3 changes: 2 additions & 1 deletion application/backend/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ dependencies = [
"openvino==2025.2.0",
"pydantic-settings~=2.10.1",
"aiortc~=1.13.0",
"anomalib[full]",
"anomalib[openvino]",
"tensorboard",
"torchmetrics>=1.8.2",
"opencv-python-headless~=4.12",
"watchdog==6.0.0",
Expand Down
11 changes: 7 additions & 4 deletions application/backend/src/entities/stream_data.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
# Copyright (C) 2025 Intel Corporation
# Copyright (C) 2025-2026 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from __future__ import annotations

from dataclasses import dataclass
from typing import Any
from typing import TYPE_CHECKING, Any

import numpy as np
from anomalib.data import NumpyImageBatch as PredictionResult
if TYPE_CHECKING:
import numpy as np
from anomalib.data import NumpyImageBatch as PredictionResult


@dataclass(kw_only=True)
Expand Down
12 changes: 8 additions & 4 deletions application/backend/src/services/dispatchers/base.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,22 @@
# Copyright (C) 2025 Intel Corporation
# Copyright (C) 2025-2026 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations

import asyncio
import base64
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime
from typing import Any
from typing import TYPE_CHECKING, Any

import cv2
import numpy as np
from anomalib.data import NumpyImageBatch as PredictionResult

from pydantic_models import OutputFormat, Sink

if TYPE_CHECKING:
import numpy as np
from anomalib.data import NumpyImageBatch as PredictionResult


def numpy_to_base64(image: np.ndarray, fmt: str = ".jpg") -> str:
Copy link

Copilot AI Jan 13, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The function uses np.ndarray as a type annotation but numpy is now only imported in the TYPE_CHECKING block. This will cause a NameError at runtime. Either import numpy unconditionally or use a string literal for the type annotation: image: 'np.ndarray'.

Copilot uses AI. Check for mistakes.
"""Convert a numpy array image to a base64 string."""
Expand Down
11 changes: 8 additions & 3 deletions application/backend/src/services/dispatchers/filesystem.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,22 @@
# Copyright (C) 2025 Intel Corporation
# Copyright (C) 2025-2026 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from __future__ import annotations

import os
from datetime import datetime
from typing import TYPE_CHECKING

import cv2
import numpy as np
from anomalib.data import NumpyImageBatch as PredictionResult
from loguru import logger

from pydantic_models.sink import FolderSinkConfig, OutputFormat
from services.dispatchers.base import BaseDispatcher

if TYPE_CHECKING:
import numpy as np
from anomalib.data import NumpyImageBatch as PredictionResult


class FolderDispatcher(BaseDispatcher):
"""FolderDispatcher allows outputting to a folder in the local filesystem."""
Expand Down
20 changes: 12 additions & 8 deletions application/backend/src/services/dispatchers/mqtt.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
# Copyright (C) 2025 Intel Corporation
# Copyright (C) 2025-2026 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from __future__ import annotations

import json
import threading
import time
from typing import Any
from typing import TYPE_CHECKING, Any

import numpy as np
from anomalib.data import NumpyImageBatch as PredictionResult
from loguru import logger

from pydantic_models.sink import MqttSinkConfig
Expand All @@ -18,6 +18,10 @@
except ImportError:
mqtt = None # type: ignore[assignment]

if TYPE_CHECKING:
import numpy as np
from anomalib.data import NumpyImageBatch as PredictionResult


MAX_RETRIES = 3
RETRY_DELAY = 1
Expand All @@ -28,7 +32,7 @@ class MqttDispatcher(BaseDispatcher):
def __init__(
self,
output_config: MqttSinkConfig,
mqtt_client: "mqtt.Client | None" = None,
mqtt_client: mqtt.Client | None = None,
track_messages: bool | None = False,
) -> None:
"""
Expand Down Expand Up @@ -61,7 +65,7 @@ def __init__(
self.client = mqtt_client or self._create_default_client()
self._connect()

def _create_default_client(self) -> "mqtt.Client":
def _create_default_client(self) -> mqtt.Client:
client_id = f"dispatcher_{int(time.time())}"
client = mqtt.Client(client_id=client_id)
client.on_connect = self._on_connect
Expand All @@ -86,15 +90,15 @@ def _connect(self) -> None:
time.sleep(RETRY_DELAY * (attempt + 1))
raise ConnectionError("Failed to connect to MQTT broker")

def _on_connect(self, _client: "mqtt.Client", _userdata: Any, _flags: dict[str, int], rc: int):
def _on_connect(self, _client: mqtt.Client, _userdata: Any, _flags: dict[str, int], rc: int):
if rc == 0:
self._connected = True
self._connection_event.set()
logger.info("Connected to MQTT broker")
else:
logger.error(f"MQTT connect failed with code {rc}")

def _on_disconnect(self, _client: "mqtt.Client", _userdata: Any, rc: int):
def _on_disconnect(self, _client: mqtt.Client, _userdata: Any, rc: int):
self._connected = False
self._connection_event.clear()
logger.warning(f"MQTT disconnected (rc={rc})")
Expand Down
12 changes: 8 additions & 4 deletions application/backend/src/services/dispatchers/webhook.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,24 @@
# Copyright (C) 2025 Intel Corporation
# Copyright (C) 2025-2026 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

"""This module contains the WebhookDispatcher class for dispatching images and predictions to a webhook endpoint."""

from typing import Any
from __future__ import annotations

from typing import TYPE_CHECKING, Any

import numpy as np
import requests
from anomalib.data import NumpyImageBatch as PredictionResult
from loguru import logger
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

from pydantic_models.sink import WebhookSinkConfig
from services.dispatchers.base import BaseDispatcher

if TYPE_CHECKING:
import numpy as np
from anomalib.data import NumpyImageBatch as PredictionResult

MAX_RETRIES = 3
BACKOFF_FACTOR = 0.3
RETRY_ON_STATUS = [500, 502, 503, 504]
Expand Down
7 changes: 3 additions & 4 deletions application/backend/src/services/training_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,9 @@ async def _run_training_job(cls, job: Job, job_service: JobService) -> Model | N
project_id = job.project_id
model_name = job.payload.get("model_name")
device = job.payload.get("device")
snapshot_id_ = job.payload.get("dataset_snapshot_id")
snapshot_id = UUID(snapshot_id_) if snapshot_id_ else None
max_epochs = job.payload.get("max_epochs", 200)

snapshot_id = UUID(snapshot_id_str) if (snapshot_id_str := job.payload.get("dataset_snapshot_id")) else None
Copy link

Copilot AI Jan 13, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The variable name 'snapshot_id_str' is ambiguous. Consider renaming it to 'dataset_snapshot_id_str' to better reflect that it comes from the dataset_snapshot_id payload field.

Suggested change
snapshot_id = UUID(snapshot_id_str) if (snapshot_id_str := job.payload.get("dataset_snapshot_id")) else None
snapshot_id = UUID(dataset_snapshot_id_str) if (dataset_snapshot_id_str := job.payload.get("dataset_snapshot_id")) else None

Copilot uses AI. Check for mistakes.
# UI can return None
max_epochs: int = payload_epochs if (payload_epochs := job.payload.get("max_epochs")) is not None else 200
if model_name is None:
raise ValueError(f"Job {job.id} payload must contain 'model_name'")

Expand Down
Loading
Loading