diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index c41249c..a6bd985 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -18,10 +18,10 @@ jobs:
id-token: write
actions: read
steps:
- - name: Clone repository
+ - name: ποΈ Clone repository
uses: actions/checkout@v4
- - name: Install uv and set the python version
+ - name: π Install uv and set the python version
uses: astral-sh/setup-uv@v5
with:
version: "0.5.24"
@@ -29,19 +29,18 @@ jobs:
cache-dependency-glob: "uv.lock"
python-version: 3.12
-
- - name: Install dependencies
+ - name: π¦ Install dependencies
run: |
uv sync --only-group docs
- - name: Build MkDocs site
+ - name: ποΈ Build MkDocs site
run: |
mkdocs build
- - name: Setup Pages
+ - name: π§ Setup Pages
uses: actions/configure-pages@v3
- - name: Upload artifact
+ - name: π€ Upload artifact
uses: actions/upload-pages-artifact@v3
with:
path: ./site
@@ -61,6 +60,6 @@ jobs:
url: ${{ steps.deployment.outputs.page_url }}
steps:
- - name: Deploy to GitHub Pages
+ - name: π Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 394eac7..aafcf98 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -5,12 +5,16 @@ on:
branches: [ main ]
paths-ignore:
- '**.md'
+ - '*.md'
+ - '**.js'
+ - '**.html'
+ - '**.css'
- 'docs/**'
- 'LICENSE'
- '.gitignore'
- '.pre-commit-config.yaml'
- 'mkdocs.yml'
- - 'docs_requirements.txt'
+ - 'CHANGELOG.md'
jobs:
test:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 71c801c..4a6e54f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,10 @@
# Changelog
+## [v0.0.1-rc1] - 2025-02-10
+**Release Candidate with Full Feature Set and Documentation**
+- Add docs page build with mkdocs-material. Read more [https://agfianf.github.io/color-correction/](https://agfianf.github.io/color-correction/)
+- Complete docstrings for all classes and methods
+
## [v0.0.1b3] - 2025-02-06
**Add Analyzer Report and Bug Fixes**
diff --git a/Makefile b/Makefile
index 3a6341f..8eccc6f 100644
--- a/Makefile
+++ b/Makefile
@@ -19,3 +19,12 @@ log:
update-uv-lock:
uv lock
+
+list-installed:
+ uv pip list
+
+sync-docs:
+ uv sync --only-group={docs,dev}
+
+sync-all:
+ uv sync --all-groups --no-group dev-model
diff --git a/color_correction/core/correction/polynomial.py b/color_correction/core/correction/polynomial.py
index 522bfca..0e0a997 100644
--- a/color_correction/core/correction/polynomial.py
+++ b/color_correction/core/correction/polynomial.py
@@ -20,8 +20,9 @@ class Polynomial(BaseComputeCorrection):
----------
**kwargs : dict, optional
Keyword arguments. Recognized keyword:
- degree : int, optional, default 2
- Degree of the polynomial.
+
+ - `degree` : int, optional, default 2
+ Degree of the polynomial.
"""
def __init__(self, **kwargs: dict) -> None:
@@ -60,17 +61,15 @@ def fit(
Reference image patches.
**kwargs : dict
Additional keyword arguments. Recognized keyword:
- degree : int, optional
- Degree of the polynomial.
+
+ - `degree` : int, optional
+ Degree of the polynomial.
Returns
-------
np.ndarray
Fitted model pipeline.
- Notes
- -----
- The execution time for model fitting is printed.
"""
start_time = time.perf_counter()
degree = kwargs.get("degree", self.degree)
diff --git a/color_correction/schemas/__init__.py b/color_correction/schemas/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/color_correction/schemas/custom_types.py b/color_correction/schemas/custom_types.py
new file mode 100644
index 0000000..5b52219
--- /dev/null
+++ b/color_correction/schemas/custom_types.py
@@ -0,0 +1,70 @@
+"""
+Images Schema Module
+--------------------
+This module defines type annotations for image processing.
+
+Attributes
+----------
+ColorPatchType : numpy.typing.NDArray[np.uint8]
+ Represents a color patch extracted from an image. Usually a mean of a region patch.
+
+ Example
+ -------
+ ```python
+ np.array(
+ [
+ [68, 82, 115], # 1. Dark skin
+ [128, 149, 195], # 2. Light skin
+ [157, 123, 93], # 3. Blue sky
+ [65, 108, 91], # 4. Foliage
+ [175, 129, 130], # 5. Blue flower
+ [171, 191, 99], # 6. Bluish green
+ [46, 123, 220], # 7. Orange
+ [168, 92, 72], # 8. Purplish blue
+ [97, 84, 194], # 9. Moderate red
+ [104, 59, 91], # 10. Purple
+ [62, 189, 161], # 11. Yellow green
+ [40, 161, 229], # 12. Orange yellow
+ [147, 63, 42], # 13. Blue
+ [72, 149, 72], # 14. Green
+ [57, 50, 175], # 15. Red
+ [22, 200, 238], # 16. Yellow
+ [150, 84, 188], # 17. Magenta
+ [166, 137, 0], # 18. Cyan
+ [240, 245, 245], # 19. White 9.5
+ [201, 202, 201], # 20. Neutral 8
+ [162, 162, 161], # 21. Neutral 6.5
+ [121, 121, 120], # 22. Neutral 5
+ [85, 85, 83], # 23. Neutral 3.5
+ [51, 50, 50], # 24. Black 2
+ ],
+ )
+ ```
+ImageType : numpy.typing.NDArray[np.uint8]
+ Represents an image 3D array with shape (H, W, C) in uint8 format.
+ImageBGR : numpy.typing.NDArray[np.uint8]
+ Represents an image in BGR format (OpenCV default).
+ImageRGB : numpy.typing.NDArray[np.uint8]
+ Represents an image in RGB format.
+
+"""
+
+from typing import Literal
+
+import numpy as np
+from numpy.typing import NDArray
+
+LiteralModelCorrection = Literal[
+ "least_squares",
+ "polynomial",
+ "linear_reg",
+ "affine_reg",
+]
+
+LiteralModelDetection = Literal["yolov8"]
+
+ColorPatchType = NDArray[np.uint8]
+ImageType = NDArray[np.uint8]
+ImageBGR = NDArray[np.uint8]
+ImageRGB = NDArray[np.uint8]
+ImageGray = NDArray[np.uint8]
diff --git a/color_correction/schemas/det_yv8.py b/color_correction/schemas/det_yv8.py
index 6d4a3f7..17db913 100644
--- a/color_correction/schemas/det_yv8.py
+++ b/color_correction/schemas/det_yv8.py
@@ -1,3 +1,10 @@
+"""
+Module for detection result schema using Pydantic.
+
+Provides the DetectionResult model that contains detection boxes, scores, and class ids,
+and a helper method to draw these detections on an image.
+"""
+
import numpy as np
from pydantic import BaseModel
@@ -7,6 +14,38 @@
class DetectionResult(BaseModel):
+ """
+ Detection result model for YOLOv8 card and color patches detection.
+
+ A data model that encapsulates YOLOv8 detection results for a standardized color
+ card and its color patches. The model handles two distinct classes:
+ patches (label 0) and card (label 1). In a typical detection scenario,
+ the model captures one color calibration card and 24 color patches.
+
+
+ Notes
+ -----
+ The detection typically yields 25 objects:
+
+ - 1 calibration card (class_id: 1)
+ - 24 color patches (class_id: 0)
+
+ Attributes
+ ----------
+ boxes : list[tuple[int, int, int, int]]
+ List of bounding boxes as (x1, y1, x2, y2).
+ Representing the top-left and bottom-right corners of the detection.
+ Class identifiers for each detected object where:
+
+ - 0: represents color patches
+ - 1: represents the calibration card
+
+ scores : list[float]
+ List of confidence scores for each detection.
+ class_ids : list[int]
+ List of class IDs corresponding to each detection.
+ """
+
boxes: list[BoundingBox]
scores: list[float]
class_ids: list[int]
@@ -15,5 +54,17 @@ def draw_detections(
self,
image: np.ndarray,
) -> np.ndarray:
- """Draw detection boxes on image."""
+ """
+ Draw detection boxes on the provided image.
+
+ Parameters
+ ----------
+ image : numpy.ndarray
+ The image on which the detection boxes will be drawn.
+
+ Returns
+ -------
+ numpy.ndarray
+ The image with the drawn detection boxes.
+ """
return draw_detections(image, self.boxes, self.scores, self.class_ids)
diff --git a/color_correction/schemas/device.py b/color_correction/schemas/device.py
index 2df25a9..0ec9540 100644
--- a/color_correction/schemas/device.py
+++ b/color_correction/schemas/device.py
@@ -19,7 +19,20 @@ class CPUArchitecture(str, Enum):
class DeviceSpecs(BaseModel):
- """Device specifications schema."""
+ """
+ Device specifications schema.
+
+ Attributes
+ ----------
+ os_name : str
+ Operating system name.
+ cpu_arch : CPUArchitecture
+ CPU architecture.
+ gpu_type : GPUType
+ GPU type.
+ is_apple_silicon : bool
+ Whether the device is Apple Silicon.
+ """
os_name: str = Field(..., description="Operating system name")
cpu_arch: CPUArchitecture = Field(
diff --git a/color_correction/schemas/images.py b/color_correction/schemas/images.py
deleted file mode 100644
index 040cde5..0000000
--- a/color_correction/schemas/images.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import numpy as np
-from numpy.typing import NDArray
-
-ColorPatchType = NDArray[np.uint8]
-ImageType = NDArray[np.uint8]
diff --git a/color_correction/services/color_correction.py b/color_correction/services/color_correction.py
index 0f12cb8..fedf51f 100644
--- a/color_correction/services/color_correction.py
+++ b/color_correction/services/color_correction.py
@@ -2,19 +2,19 @@
import cv2
import numpy as np
-from numpy.typing import NDArray
from color_correction.constant.color_checker import reference_color_d50_bgr
-from color_correction.constant.methods import (
- LiteralModelCorrection,
- LiteralModelDetection,
-)
from color_correction.core.card_detection.det_yv8_onnx import (
YOLOv8CardDetector,
)
from color_correction.core.correction import CorrectionModelFactory
from color_correction.processor.det_yv8 import DetectionProcessor
-from color_correction.schemas.images import ColorPatchType, ImageType
+from color_correction.schemas.custom_types import (
+ ColorPatchType,
+ ImageBGR,
+ LiteralModelCorrection,
+ LiteralModelDetection,
+)
from color_correction.utils.image_patch import (
create_patch_tiled_image,
visualize_patch_comparison,
@@ -26,17 +26,18 @@
class ColorCorrection:
- """Color correction handler using color card detection and correction models.
-
+ """Color correction handler using color `card_detection` and `correction_models`.
This class handles the complete workflow of color correction, including:
+
- Color card detection in images
- Color patch extraction
- Color correction model training
- Image correction application
+ - Evaluation of color correction patches
Parameters
----------
- detection_model : {'yolov8'}
+ detection_model : LiteralModelDetection, optional
The model to use for color card detection.
detection_conf_th : float, optional
Confidence threshold for card detection.
@@ -58,11 +59,11 @@ class ColorCorrection:
Attributes
----------
- reference_patches : List[ColorPatchType] | None
+ reference_patches : list[ColorPatchType] | None
Extracted color patches from reference image.
- reference_grid_image : ImageType | None
+ reference_grid_image : ImageBGR | None
Visualization of reference color patches in grid format.
- reference_debug_image : ImageType | None
+ reference_debug_image : ImageBGR | None
Debug visualization of reference image preprocessing.
"""
@@ -71,7 +72,7 @@ def __init__(
detection_model: LiteralModelDetection = "yolov8",
detection_conf_th: float = 0.25,
correction_model: LiteralModelCorrection = "least_squares",
- reference_image: ImageType | None = None,
+ reference_image: ImageBGR | None = None,
use_gpu: bool = True,
**kwargs: dict,
) -> None:
@@ -137,14 +138,14 @@ def _create_detector(
def _extract_color_patches(
self,
- image: ImageType,
+ image: ImageBGR,
debug: bool = False,
- ) -> tuple[list[ColorPatchType], ImageType, ImageType | None]:
+ ) -> tuple[list[ColorPatchType], ImageBGR, ImageBGR | None]:
"""Extract color patches from an image using card detection.
Parameters
----------
- image : ImageType
+ image : ImageBGR
Input image in BGR format.
debug : bool, optional
Whether to generate debug visualizations.
@@ -168,15 +169,15 @@ def _extract_color_patches(
def _save_debug_output(
self,
- input_image: ImageType,
- corrected_image: ImageType,
+ input_image: ImageBGR,
+ corrected_image: ImageBGR,
output_directory: str,
) -> None:
"""Save debug visualizations to disk.
Parameters
----------
- input_image : ImageType
+ input_image : ImageBGR
The input image.
corrected_image : ImageType
The color-corrected image.
@@ -258,11 +259,13 @@ def ref_patches(self) -> np.ndarray:
-------
tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]
A tuple containing:
- - reference_patches: The array representing the reference color patches.
- - reference_grid_image: The array depicting
- the grid layout of the reference patches.
- - reference_debug_image: The array used for debugging
- the color correction process.
+
+ - **self.reference_patches**: The array representing the reference
+ color patches.
+ - **self.reference_grid_image**: The array depicting the grid layout of
+ the reference patches.
+ - **self.reference_debug_image**: The array used for debugging the color
+ correction process.
"""
return (
self.reference_patches,
@@ -292,9 +295,11 @@ def set_reference_patches(
-------
None
Sets the following instance attributes:
- - reference_patches: Color values of reference patches
- - reference_grid_image: Grid image of reference patches
- - reference_debug_image: Debug visualization (only if debug=True and image provided)
+
+ - `self.reference_patches`: Color values of reference patches
+ - `self.reference_grid_image`: Grid image of reference patches
+ - `self.reference_debug_image`: Debug visualization (only if debug=True and image provided)
+
""" # noqa: E501
if image is None:
self.reference_patches = reference_color_d50_bgr
@@ -323,19 +328,21 @@ def set_input_patches(self, image: np.ndarray, debug: bool = False) -> None:
-------
tuple
Contains three elements:
- - input_patches : np.ndarray
+
+ - `self.input_patches` : np.ndarray
Extracted color patches from the image
- - input_grid_image : np.ndarray
+ - `self.input_grid_image` : np.ndarray
Visualization of the detected grid
- - input_debug_image : np.ndarray
+ - `self.input_debug_image` : np.ndarray
Debug visualization (if debug=True)
Notes
-----
The function will set class attributes:
- - `self.input_patches`
- - `self.input_grid_image`
- - `self.input_debug_image`
+
+ - `self.input_patches`
+ - `self.input_grid_image`
+ - `self.input_debug_image`
The function first resets these attributes to None before processing π
"""
@@ -350,7 +357,7 @@ def set_input_patches(self, image: np.ndarray, debug: bool = False) -> None:
) = self._extract_color_patches(image=image, debug=debug)
return self.input_patches, self.input_grid_image, self.input_debug_image
- def fit(self) -> tuple[NDArray, list[ColorPatchType], list[ColorPatchType]]:
+ def fit(self) -> tuple[np.ndarray, list[ColorPatchType], list[ColorPatchType]]:
"""Fit color correction model using input and reference images.
Parameters
@@ -362,7 +369,7 @@ def fit(self) -> tuple[NDArray, list[ColorPatchType], list[ColorPatchType]]:
Returns
-------
- Tuple[np.ndarray, List[np.ndarray], List[np.ndarray]]
+ tuple[np.ndarray, list[np.ndarray], list[np.ndarray]]
Correction weights, input patches, and reference patches.
"""
if self.reference_patches is None:
@@ -386,15 +393,15 @@ def fit(self) -> tuple[NDArray, list[ColorPatchType], list[ColorPatchType]]:
def predict(
self,
- input_image: ImageType,
+ input_image: ImageBGR,
debug: bool = False,
debug_output_dir: str = "output-debug",
- ) -> ImageType:
+ ) -> ImageBGR:
"""Apply color correction to input image.
Parameters
----------
- input_image : ImageType
+ input_image : ImageBGR
Image to be color corrected.
debug : bool, optional
Whether to save debug visualizations.
@@ -403,7 +410,7 @@ def predict(
Returns
-------
- ImageType
+ ImageBGR
Color corrected image.
Raises
@@ -428,6 +435,37 @@ def predict(
return corrected_image
def calc_color_diff_patches(self) -> dict:
+ """
+ Calculate color difference metrics for image patches using the dE CIE 2000 metric.
+
+ This method computes the color differences between:
+
+ - The initial (uncorrected) input patches and the reference patches.
+ - The corrected patches and the reference patches.
+
+ It then calculates the delta as the difference between the initial and corrected color differences
+ (i.e., initial minus corrected) to assess the change in color discrepancy after correction.
+
+ Notes
+ -----
+ This function processes patches only, not whole images. The calculations compare the color differences
+ between patches before correction and patches after correction against the same reference patches.
+
+ Returns
+ -------
+ dict
+ A dictionary with the following keys:
+
+ - `initial`: dict containing the color difference metrics for the initial patches versus the reference.
+ - `corrected`: dict containing the color difference metrics for the corrected patches versus the reference.
+ - `delta`: dict with metrics representing the difference between the initial and corrected color differences.
+ Each metric is computed as:
+ ```python
+ metric_delta = metric_initial - metric_corrected,
+ ```
+ where metrics include `min`, `max`, `mean`, and `std`.
+
+ """ # noqa: E501
initial_color_diff = calc_color_diff(
image1=self.input_grid_image,
image2=self.reference_grid_image,
diff --git a/color_correction/services/correction_analyzer.py b/color_correction/services/correction_analyzer.py
index 3d8487a..aabd21d 100644
--- a/color_correction/services/correction_analyzer.py
+++ b/color_correction/services/correction_analyzer.py
@@ -3,7 +3,7 @@
import numpy as np
import pandas as pd
-from color_correction.constant.methods import (
+from color_correction.schemas.custom_types import (
LiteralModelCorrection,
LiteralModelDetection,
)
@@ -163,7 +163,7 @@ def run(
input_image : np.ndarray
The image to be processed.
output_dir : str, optional
- The directory to save reports, by default "benchmark_debug".
+ The directory to save reports, by default `benchmark_debug`.
reference_image : np.ndarray, optional
Optional reference image used for evaluation, by default None.
@@ -172,6 +172,15 @@ def run(
pd.DataFrame
A DataFrame containing results of all experiments.
+ Notes
+ -----
+ The specified output directory will be created if it does not exist. All benchmark results are saved in this folder.
+ The directory to save benchmark results, by default "benchmark_debug". This folder will contain:
+
+ - **An HTML report**: includes a matrix table showing correction methods vs. evaluation delta E (CIE 2000) and preview images.
+ - **A CSV file**: A CSV report of the DataFrame with image data columns removed.
+ - **A PKL file**: A pickle file containing the full DataFrame.
+
Examples
--------
>>> import numpy as np
diff --git a/color_correction/utils/correction.py b/color_correction/utils/correction.py
index 91f6f47..7c5ffe6 100644
--- a/color_correction/utils/correction.py
+++ b/color_correction/utils/correction.py
@@ -2,6 +2,19 @@
def preprocessing_compute(input_image: np.ndarray) -> np.ndarray:
+ """
+ Preprocess the input image for computation by reshaping and converting datatype.
+
+ Parameters
+ ----------
+ input_image : np.ndarray
+ Input image array that can be either a grid (24, 3) or a general image.
+
+ Returns
+ -------
+ np.ndarray
+ Processed image data as a float32 numpy array.
+ """
if input_image.shape == (24, 3):
# to handle grid image patches only
image = input_image.astype(np.float32)
@@ -14,6 +27,21 @@ def postprocessing_compute(
original_shape: tuple,
predict_image: np.ndarray,
) -> np.ndarray:
+ """
+ Convert predicted image data back into its original shape and type.
+
+ Parameters
+ ----------
+ original_shape : tuple
+ The original dimensions of the image. Should be 2 or (H, W, C).
+ predict_image : np.ndarray
+ The processed image data to be reshaped and clipped.
+
+ Returns
+ -------
+ np.ndarray
+ The final corrected image, reshaped to original dimensions and in uint8 format.
+ """
if len(original_shape) == 2:
# to handle grid image patches only
corrected_image = np.clip(predict_image, 0, 255).astype(np.uint8)
diff --git a/color_correction/utils/device_info.py b/color_correction/utils/device_info.py
index e9b456c..868f95a 100644
--- a/color_correction/utils/device_info.py
+++ b/color_correction/utils/device_info.py
@@ -10,23 +10,18 @@
def detect_darwin(specs: dict[str, Any]) -> dict[str, Any]:
- """Detect hardware specifications for macOS (Darwin).
+ """
+ Detect hardware specifications on macOS, including CPU and GPU details.
Parameters
----------
specs : dict
- Base specifications dictionary with OS information.
+ Initial dictionary containing OS information.
Returns
-------
dict
- Updated specifications with CPU and GPU information.
-
- Notes
- -----
- Detects:
- 1. Apple Silicon vs Intel via sysctl
- 2. GPU type (Apple/AMD/NVIDIA) via system_profiler
+ Updated dictionary with CPU architecture and GPU type for macOS.
"""
try:
cpu_info = subprocess.check_output(
@@ -58,23 +53,18 @@ def detect_darwin(specs: dict[str, Any]) -> dict[str, Any]:
def detect_linux(specs: dict[str, Any]) -> dict[str, Any]:
- """Detect hardware specifications for Linux.
+ """
+ Detect hardware specifications on Linux systems.
Parameters
----------
specs : dict
- Base specifications dictionary with OS information.
+ Initial dictionary with OS information.
Returns
-------
dict
- Updated specifications with CPU and GPU information.
-
- Notes
- -----
- Detects:
- 1. CPU architecture (ARM/x86_64) via lscpu
- 2. GPU type (NVIDIA/AMD) via nvidia-smi or lspci
+ Updated dictionary with CPU architecture and GPU type for Linux.
"""
try:
cpu_info = subprocess.check_output("lscpu", shell=True).decode().lower()
@@ -110,17 +100,18 @@ def detect_linux(specs: dict[str, Any]) -> dict[str, Any]:
def detect_windows(specs: dict[str, Any]) -> dict[str, Any]:
- """Detect hardware specifications for Windows.
+ """
+ Detect hardware specifications on Windows systems.
Parameters
----------
specs : dict
- Base specifications dictionary with OS information.
+ Initial dictionary with OS information.
Returns
-------
dict
- Updated specifications with CPU and GPU information.
+ Updated dictionary with CPU architecture and GPU type for Windows.
"""
proc = platform.processor().lower()
if "intel" in proc:
@@ -137,12 +128,14 @@ def detect_windows(specs: dict[str, Any]) -> dict[str, Any]:
def get_device_specs() -> DeviceSpecs:
- """Get device hardware specifications.
+ """
+ Retrieve a structured set of device hardware specifications.
Returns
-------
DeviceSpecs
- Pydantic model containing device specifications.
+ An object containing OS name, CPU architecture, GPU type,
+ and Apple Silicon flag.
"""
specs = {
"os_name": platform.system(),
diff --git a/color_correction/utils/downloader.py b/color_correction/utils/downloader.py
index 19770e6..f472d39 100644
--- a/color_correction/utils/downloader.py
+++ b/color_correction/utils/downloader.py
@@ -10,19 +10,18 @@
def download_google_drive_file(file_id: str, output_file: str) -> None:
"""
- Download a file from Google Drive using its file ID.
+ Download a file from Google Drive using a file ID.
Parameters
----------
file_id : str
- The unique ID of the file on Google Drive.
+ Unique identifier of the file on Google Drive.
output_file : str
- The name of the output file where the content will be saved.
+ Local path where the downloaded file will be saved.
Returns
-------
None
- Downloads the file and saves it locally.
"""
url: Final = f"https://drive.google.com/uc?export=download&id={file_id}"
@@ -51,6 +50,19 @@ def download_google_drive_file(file_id: str, output_file: str) -> None:
def downloader_model_yolov8(use_gpu: bool = False) -> str:
+ """
+ Download the appropriate YOLOv8 model based on device specifications.
+
+ Parameters
+ ----------
+ use_gpu : bool, optional
+ Flag indicating whether to use a GPU model; default is False.
+
+ Returns
+ -------
+ str
+ The file path to the downloaded YOLOv8 model.
+ """
specs = get_device_specs()
model_folder = os.path.join(os.getcwd(), "tmp", "models")
if use_gpu:
diff --git a/color_correction/utils/formater.py b/color_correction/utils/formater.py
index 0afce12..ffd71a8 100644
--- a/color_correction/utils/formater.py
+++ b/color_correction/utils/formater.py
@@ -6,7 +6,25 @@
def format_value(value: np.ndarray | dict | list | float | str) -> str:
- """Format different types of values for HTML display"""
+ """
+ Format different types of values for HTML display.
+
+ Parameters
+ ----------
+ value : np.ndarray, dict, list, float, or str
+ The input value that needs to be formatted. If the value is:
+
+ - an `np.ndarray`, it is assumed to represent an image array and will be converted
+ to a base64-encoded HTML image.
+ - a `dict` or `list`, it will be converted to its JSON string representation.
+ - an `np.float64` or `np.float32`, it will be formatted as a float with 4 decimal places.
+ - any other type, it will be converted to a string using the str() function.
+
+ Returns
+ -------
+ str
+ A string representation of the input value formatted for HTML display.
+ """ # noqa: E501
if isinstance(value, np.ndarray): # Image arrays
return f''
elif isinstance(value, dict | list): # Dictionaries or lists
diff --git a/color_correction/utils/geometry_processing.py b/color_correction/utils/geometry_processing.py
index 812dd92..770a320 100644
--- a/color_correction/utils/geometry_processing.py
+++ b/color_correction/utils/geometry_processing.py
@@ -8,6 +8,24 @@ def get_max_iou_shapely(
ref_box: shapely.geometry.box,
target_boxes: list[shapely.geometry.box],
) -> tuple[float, int, shapely.geometry.box]:
+ """
+ Find the target box with the highest IoU compared to a reference box.
+
+ Parameters
+ ----------
+ ref_box : shapely.geometry.box
+ The reference bounding box.
+ target_boxes : list of shapely.geometry.box
+ List of candidate boxes.
+
+ Returns
+ -------
+ tuple
+ A tuple containing:
+ - Maximum IoU (float)
+ - Index of the box with the maximum IoU (int)
+ - The corresponding target box (shapely.geometry.box)
+ """
max_iou = 0
max_idx = -1
@@ -29,16 +47,54 @@ def get_max_iou_shapely(
def box_to_xyxy(box: shapely.geometry.box) -> tuple[int, int, int, int]:
- """Convert shapely box to xyxy format"""
+ """
+ Convert a Shapely box to (x1, y1, x2, y2) format.
+
+ Parameters
+ ----------
+ box : shapely.geometry.box
+ Input Shapely box.
+
+ Returns
+ -------
+ tuple[int, int, int, int]
+ Coordinates in (x1, y1, x2, y2) format.
+ """
minx, miny, maxx, maxy = box.bounds
return int(minx), int(miny), int(maxx), int(maxy)
def box_centroid_xy(box: shapely.geometry.box) -> tuple[int, int]:
+ """
+ Get the centroid coordinates of a Shapely box.
+
+ Parameters
+ ----------
+ box : shapely.geometry.box
+ Input Shapely box.
+
+ Returns
+ -------
+ tuple[int, int]
+ Coordinates of the centroid (x, y).
+ """
return int(box.centroid.x), int(box.centroid.y)
def generate_expected_patches(card_box: box_tuple) -> list[box_tuple]:
+ """
+ Generate a grid of expected patch coordinates within a card box.
+
+ Parameters
+ ----------
+ card_box : tuple[int, int, int, int]
+ Coordinates of the card in (x1, y1, x2, y2) format.
+
+ Returns
+ -------
+ list[box_tuple]
+ List of patch coordinates arranged in a grid.
+ """
card_x1, card_y1, card_x2, card_y2 = card_box
card_width = card_x2 - card_x1
card_height = card_y2 - card_y1
@@ -63,6 +119,21 @@ def extract_intersecting_patches(
ls_patches: list[box_tuple],
ls_grid_card: list[box_tuple],
) -> list[tuple[box_tuple, tuple[int, int]]]:
+ """
+ Extract patches that intersect with each grid card and compute centroids.
+
+ Parameters
+ ----------
+ ls_patches : list[box_tuple]
+ List of detected patch coordinates.
+ ls_grid_card : list[box_tuple]
+ List of grid card coordinates.
+
+ Returns
+ -------
+ list[tuple[box_tuple, tuple[int, int]]]
+ Each element is a tuple of the intersecting patch coordinates and its centroid.
+ """
ls_ordered_patch = []
for _, grid_card in enumerate(ls_grid_card, start=1):
# get intesect patch
@@ -89,6 +160,19 @@ def extract_intersecting_patches(
def calculate_patch_statistics(ls_ordered_patch: list[box_tuple]) -> tuple:
+ """
+ Calculate mean differences in positions and sizes for patches.
+
+ Parameters
+ ----------
+ ls_ordered_patch : list[box_tuple]
+ List of patch coordinates.
+
+ Returns
+ -------
+ tuple
+ A tuple containing mean dx, mean dy, mean width, and mean height.
+ """
ls_dx = []
ls_dy = []
ls_w_grid = []
@@ -126,6 +210,20 @@ def calculate_patch_statistics(ls_ordered_patch: list[box_tuple]) -> tuple:
def suggest_missing_patch_coordinates( # noqa: C901
ls_ordered_patch: list[box_tuple],
) -> dict[int, box_tuple]:
+ """
+ Suggest coordinates for missing patches based on neighboring patches.
+
+ Parameters
+ ----------
+ ls_ordered_patch : list[box_tuple]
+ List of ordered patch coordinates (with None for missing patches).
+
+ Returns
+ -------
+ dict[int, box_tuple]
+ A dictionary where keys are indices of missing patches and values
+ are the suggested coordinates.
+ """
d_suggest = {}
mean_dx, mean_dy, mean_w, mean_h = calculate_patch_statistics(
diff --git a/color_correction/utils/image_processing.py b/color_correction/utils/image_processing.py
index a0cc535..264a8af 100644
--- a/color_correction/utils/image_processing.py
+++ b/color_correction/utils/image_processing.py
@@ -15,21 +15,22 @@ def crop_region_with_margin(
coordinates: tuple[int, int, int, int],
margin_ratio: float = 0.2,
) -> np.ndarray:
- """Crop a region from image with additional margin from given coordinates.
+ """
+ Crop a sub-region from an image with an additional margin.
Parameters
----------
image : np.ndarray
- Input image array of shape (H, W, C) or (H, W).
- coordinates : np.ndarray
- Bounding box coordinates [x1, y1, x2, y2].
+ The input image (H, W, C) or (H, W).
+ coordinates : tuple[int, int, int, int]
+ The bounding box defined as (x1, y1, x2, y2).
margin_ratio : float, optional
- Ratio of margin to add relative to region size, by default 0.2.
+ Ratio to determine the extra margin; default is 0.2.
Returns
-------
np.ndarray
- Cropped image region with margins.
+ The cropped image region including the margin.
"""
y1, y2 = coordinates[1], coordinates[3]
x1, x2 = coordinates[0], coordinates[2]
@@ -48,17 +49,18 @@ def crop_region_with_margin(
def calc_mean_color_patch(img: np.ndarray) -> np.ndarray:
- """Calculate mean RGB/BGR values across spatial dimensions.
+ """
+ Compute the mean color of an image patch across spatial dimensions.
Parameters
----------
img : np.ndarray
- Input image array of shape (H, W, C).
+ The input image patch with shape (H, W, C).
Returns
-------
np.ndarray
- Array of mean RGB values, shape (C,), dtype uint8.
+ Array containing the mean color for each channel (dtype uint8).
"""
return np.mean(img, axis=(0, 1)).astype(np.uint8)
@@ -67,18 +69,20 @@ def calc_color_diff(
image1: ImageType,
image2: ImageType,
) -> dict[str, float]:
- """Calculate color difference metrics between two images.
+ """
+ Calculate color difference metrics between two images using CIE 2000.
Parameters
----------
- image1, image2 : NDArray
- Images to compare in BGR format.
+ image1 : ImageType
+ First input image in BGR format.
+ image2 : ImageType
+ Second input image in BGR format.
Returns
-------
dict[str, float]
- Dictionary of color difference
- keys: min, max, mean, std
+ Dictionary with keys 'min', 'max', 'mean', and 'std' for the color difference.
"""
rgb1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
rgb2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
@@ -100,7 +104,21 @@ def numpy_array_to_base64(
arr: np.ndarray,
convert_bgr_to_rgb: bool = True,
) -> str:
- """Convert numpy array (image) to base64 string"""
+ """
+ Convert a numpy image array into a base64-encoded PNG string.
+
+ Parameters
+ ----------
+ arr : np.ndarray
+ Input image array.
+ convert_bgr_to_rgb : bool, optional
+ Whether to convert BGR to RGB before encoding; default is True.
+
+ Returns
+ -------
+ str
+ Base64-encoded image string prefixed with the appropriate data URI.
+ """
if arr is None:
return ""
diff --git a/color_correction/utils/report_generator.py b/color_correction/utils/report_generator.py
index 2969965..66bb4f9 100644
--- a/color_correction/utils/report_generator.py
+++ b/color_correction/utils/report_generator.py
@@ -12,12 +12,36 @@ def __init__(self) -> None:
self.template_dir = "color_correction.templates"
def _read_template(self, filename: str) -> str:
- """Read template file content"""
+ """
+ Read and return the content of a template file.
+
+ Parameters
+ ----------
+ filename : str
+ Name of the template file to load.
+
+ Returns
+ -------
+ str
+ Content of the template file.
+ """
with resources.files(self.template_dir).joinpath(filename).open("r") as f:
return f.read()
def generate_report(self, body_report: str) -> str:
- """Generate full HTML report"""
+ """
+ Generate a complete HTML report by combining components.
+
+ Parameters
+ ----------
+ body_report : str
+ The main HTML string representing the report body.
+
+ Returns
+ -------
+ str
+ The final HTML report as a string.
+ """
# Load components
styles = self._read_template("style-report.css")
scripts = self._read_template("script-report.js")
@@ -37,7 +61,21 @@ def generate_report(self, body_report: str) -> str:
return final_html
def generate_table(self, headers: list, rows: list) -> str:
- """Generate table HTML"""
+ """
+ Generate an HTML table from headers and row data.
+
+ Parameters
+ ----------
+ headers : list
+ List of table headers.
+ rows : list
+ List of rows where each row is a string of HTML table cells.
+
+ Returns
+ -------
+ str
+ HTML string representing the complete table.
+ """
headers_html = "".join([f"