diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index c41249c..a6bd985 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -18,10 +18,10 @@ jobs: id-token: write actions: read steps: - - name: Clone repository + - name: πŸ—ƒοΈ Clone repository uses: actions/checkout@v4 - - name: Install uv and set the python version + - name: 🐍 Install uv and set the python version uses: astral-sh/setup-uv@v5 with: version: "0.5.24" @@ -29,19 +29,18 @@ jobs: cache-dependency-glob: "uv.lock" python-version: 3.12 - - - name: Install dependencies + - name: πŸ“¦ Install dependencies run: | uv sync --only-group docs - - name: Build MkDocs site + - name: πŸ—οΈ Build MkDocs site run: | mkdocs build - - name: Setup Pages + - name: πŸ”§ Setup Pages uses: actions/configure-pages@v3 - - name: Upload artifact + - name: πŸ“€ Upload artifact uses: actions/upload-pages-artifact@v3 with: path: ./site @@ -61,6 +60,6 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} steps: - - name: Deploy to GitHub Pages + - name: πŸš€ Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v4 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 394eac7..aafcf98 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -5,12 +5,16 @@ on: branches: [ main ] paths-ignore: - '**.md' + - '*.md' + - '**.js' + - '**.html' + - '**.css' - 'docs/**' - 'LICENSE' - '.gitignore' - '.pre-commit-config.yaml' - 'mkdocs.yml' - - 'docs_requirements.txt' + - 'CHANGELOG.md' jobs: test: diff --git a/CHANGELOG.md b/CHANGELOG.md index 71c801c..4a6e54f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Changelog +## [v0.0.1-rc1] - 2025-02-10 +**Release Candidate with Full Feature Set and Documentation** +- Add docs page build with mkdocs-material. Read more [https://agfianf.github.io/color-correction/](https://agfianf.github.io/color-correction/) +- Complete docstrings for all classes and methods + ## [v0.0.1b3] - 2025-02-06 **Add Analyzer Report and Bug Fixes** diff --git a/Makefile b/Makefile index 3a6341f..8eccc6f 100644 --- a/Makefile +++ b/Makefile @@ -19,3 +19,12 @@ log: update-uv-lock: uv lock + +list-installed: + uv pip list + +sync-docs: + uv sync --only-group={docs,dev} + +sync-all: + uv sync --all-groups --no-group dev-model diff --git a/color_correction/core/correction/polynomial.py b/color_correction/core/correction/polynomial.py index 522bfca..0e0a997 100644 --- a/color_correction/core/correction/polynomial.py +++ b/color_correction/core/correction/polynomial.py @@ -20,8 +20,9 @@ class Polynomial(BaseComputeCorrection): ---------- **kwargs : dict, optional Keyword arguments. Recognized keyword: - degree : int, optional, default 2 - Degree of the polynomial. + + - `degree` : int, optional, default 2 + Degree of the polynomial. """ def __init__(self, **kwargs: dict) -> None: @@ -60,17 +61,15 @@ def fit( Reference image patches. **kwargs : dict Additional keyword arguments. Recognized keyword: - degree : int, optional - Degree of the polynomial. + + - `degree` : int, optional + Degree of the polynomial. Returns ------- np.ndarray Fitted model pipeline. - Notes - ----- - The execution time for model fitting is printed. """ start_time = time.perf_counter() degree = kwargs.get("degree", self.degree) diff --git a/color_correction/schemas/__init__.py b/color_correction/schemas/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/color_correction/schemas/custom_types.py b/color_correction/schemas/custom_types.py new file mode 100644 index 0000000..5b52219 --- /dev/null +++ b/color_correction/schemas/custom_types.py @@ -0,0 +1,70 @@ +""" +Images Schema Module +-------------------- +This module defines type annotations for image processing. + +Attributes +---------- +ColorPatchType : numpy.typing.NDArray[np.uint8] + Represents a color patch extracted from an image. Usually a mean of a region patch. + + Example + ------- + ```python + np.array( + [ + [68, 82, 115], # 1. Dark skin + [128, 149, 195], # 2. Light skin + [157, 123, 93], # 3. Blue sky + [65, 108, 91], # 4. Foliage + [175, 129, 130], # 5. Blue flower + [171, 191, 99], # 6. Bluish green + [46, 123, 220], # 7. Orange + [168, 92, 72], # 8. Purplish blue + [97, 84, 194], # 9. Moderate red + [104, 59, 91], # 10. Purple + [62, 189, 161], # 11. Yellow green + [40, 161, 229], # 12. Orange yellow + [147, 63, 42], # 13. Blue + [72, 149, 72], # 14. Green + [57, 50, 175], # 15. Red + [22, 200, 238], # 16. Yellow + [150, 84, 188], # 17. Magenta + [166, 137, 0], # 18. Cyan + [240, 245, 245], # 19. White 9.5 + [201, 202, 201], # 20. Neutral 8 + [162, 162, 161], # 21. Neutral 6.5 + [121, 121, 120], # 22. Neutral 5 + [85, 85, 83], # 23. Neutral 3.5 + [51, 50, 50], # 24. Black 2 + ], + ) + ``` +ImageType : numpy.typing.NDArray[np.uint8] + Represents an image 3D array with shape (H, W, C) in uint8 format. +ImageBGR : numpy.typing.NDArray[np.uint8] + Represents an image in BGR format (OpenCV default). +ImageRGB : numpy.typing.NDArray[np.uint8] + Represents an image in RGB format. + +""" + +from typing import Literal + +import numpy as np +from numpy.typing import NDArray + +LiteralModelCorrection = Literal[ + "least_squares", + "polynomial", + "linear_reg", + "affine_reg", +] + +LiteralModelDetection = Literal["yolov8"] + +ColorPatchType = NDArray[np.uint8] +ImageType = NDArray[np.uint8] +ImageBGR = NDArray[np.uint8] +ImageRGB = NDArray[np.uint8] +ImageGray = NDArray[np.uint8] diff --git a/color_correction/schemas/det_yv8.py b/color_correction/schemas/det_yv8.py index 6d4a3f7..17db913 100644 --- a/color_correction/schemas/det_yv8.py +++ b/color_correction/schemas/det_yv8.py @@ -1,3 +1,10 @@ +""" +Module for detection result schema using Pydantic. + +Provides the DetectionResult model that contains detection boxes, scores, and class ids, +and a helper method to draw these detections on an image. +""" + import numpy as np from pydantic import BaseModel @@ -7,6 +14,38 @@ class DetectionResult(BaseModel): + """ + Detection result model for YOLOv8 card and color patches detection. + + A data model that encapsulates YOLOv8 detection results for a standardized color + card and its color patches. The model handles two distinct classes: + patches (label 0) and card (label 1). In a typical detection scenario, + the model captures one color calibration card and 24 color patches. + + + Notes + ----- + The detection typically yields 25 objects: + + - 1 calibration card (class_id: 1) + - 24 color patches (class_id: 0) + + Attributes + ---------- + boxes : list[tuple[int, int, int, int]] + List of bounding boxes as (x1, y1, x2, y2). + Representing the top-left and bottom-right corners of the detection. + Class identifiers for each detected object where: + + - 0: represents color patches + - 1: represents the calibration card + + scores : list[float] + List of confidence scores for each detection. + class_ids : list[int] + List of class IDs corresponding to each detection. + """ + boxes: list[BoundingBox] scores: list[float] class_ids: list[int] @@ -15,5 +54,17 @@ def draw_detections( self, image: np.ndarray, ) -> np.ndarray: - """Draw detection boxes on image.""" + """ + Draw detection boxes on the provided image. + + Parameters + ---------- + image : numpy.ndarray + The image on which the detection boxes will be drawn. + + Returns + ------- + numpy.ndarray + The image with the drawn detection boxes. + """ return draw_detections(image, self.boxes, self.scores, self.class_ids) diff --git a/color_correction/schemas/device.py b/color_correction/schemas/device.py index 2df25a9..0ec9540 100644 --- a/color_correction/schemas/device.py +++ b/color_correction/schemas/device.py @@ -19,7 +19,20 @@ class CPUArchitecture(str, Enum): class DeviceSpecs(BaseModel): - """Device specifications schema.""" + """ + Device specifications schema. + + Attributes + ---------- + os_name : str + Operating system name. + cpu_arch : CPUArchitecture + CPU architecture. + gpu_type : GPUType + GPU type. + is_apple_silicon : bool + Whether the device is Apple Silicon. + """ os_name: str = Field(..., description="Operating system name") cpu_arch: CPUArchitecture = Field( diff --git a/color_correction/schemas/images.py b/color_correction/schemas/images.py deleted file mode 100644 index 040cde5..0000000 --- a/color_correction/schemas/images.py +++ /dev/null @@ -1,5 +0,0 @@ -import numpy as np -from numpy.typing import NDArray - -ColorPatchType = NDArray[np.uint8] -ImageType = NDArray[np.uint8] diff --git a/color_correction/services/color_correction.py b/color_correction/services/color_correction.py index 0f12cb8..fedf51f 100644 --- a/color_correction/services/color_correction.py +++ b/color_correction/services/color_correction.py @@ -2,19 +2,19 @@ import cv2 import numpy as np -from numpy.typing import NDArray from color_correction.constant.color_checker import reference_color_d50_bgr -from color_correction.constant.methods import ( - LiteralModelCorrection, - LiteralModelDetection, -) from color_correction.core.card_detection.det_yv8_onnx import ( YOLOv8CardDetector, ) from color_correction.core.correction import CorrectionModelFactory from color_correction.processor.det_yv8 import DetectionProcessor -from color_correction.schemas.images import ColorPatchType, ImageType +from color_correction.schemas.custom_types import ( + ColorPatchType, + ImageBGR, + LiteralModelCorrection, + LiteralModelDetection, +) from color_correction.utils.image_patch import ( create_patch_tiled_image, visualize_patch_comparison, @@ -26,17 +26,18 @@ class ColorCorrection: - """Color correction handler using color card detection and correction models. - + """Color correction handler using color `card_detection` and `correction_models`. This class handles the complete workflow of color correction, including: + - Color card detection in images - Color patch extraction - Color correction model training - Image correction application + - Evaluation of color correction patches Parameters ---------- - detection_model : {'yolov8'} + detection_model : LiteralModelDetection, optional The model to use for color card detection. detection_conf_th : float, optional Confidence threshold for card detection. @@ -58,11 +59,11 @@ class ColorCorrection: Attributes ---------- - reference_patches : List[ColorPatchType] | None + reference_patches : list[ColorPatchType] | None Extracted color patches from reference image. - reference_grid_image : ImageType | None + reference_grid_image : ImageBGR | None Visualization of reference color patches in grid format. - reference_debug_image : ImageType | None + reference_debug_image : ImageBGR | None Debug visualization of reference image preprocessing. """ @@ -71,7 +72,7 @@ def __init__( detection_model: LiteralModelDetection = "yolov8", detection_conf_th: float = 0.25, correction_model: LiteralModelCorrection = "least_squares", - reference_image: ImageType | None = None, + reference_image: ImageBGR | None = None, use_gpu: bool = True, **kwargs: dict, ) -> None: @@ -137,14 +138,14 @@ def _create_detector( def _extract_color_patches( self, - image: ImageType, + image: ImageBGR, debug: bool = False, - ) -> tuple[list[ColorPatchType], ImageType, ImageType | None]: + ) -> tuple[list[ColorPatchType], ImageBGR, ImageBGR | None]: """Extract color patches from an image using card detection. Parameters ---------- - image : ImageType + image : ImageBGR Input image in BGR format. debug : bool, optional Whether to generate debug visualizations. @@ -168,15 +169,15 @@ def _extract_color_patches( def _save_debug_output( self, - input_image: ImageType, - corrected_image: ImageType, + input_image: ImageBGR, + corrected_image: ImageBGR, output_directory: str, ) -> None: """Save debug visualizations to disk. Parameters ---------- - input_image : ImageType + input_image : ImageBGR The input image. corrected_image : ImageType The color-corrected image. @@ -258,11 +259,13 @@ def ref_patches(self) -> np.ndarray: ------- tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray] A tuple containing: - - reference_patches: The array representing the reference color patches. - - reference_grid_image: The array depicting - the grid layout of the reference patches. - - reference_debug_image: The array used for debugging - the color correction process. + + - **self.reference_patches**: The array representing the reference + color patches. + - **self.reference_grid_image**: The array depicting the grid layout of + the reference patches. + - **self.reference_debug_image**: The array used for debugging the color + correction process. """ return ( self.reference_patches, @@ -292,9 +295,11 @@ def set_reference_patches( ------- None Sets the following instance attributes: - - reference_patches: Color values of reference patches - - reference_grid_image: Grid image of reference patches - - reference_debug_image: Debug visualization (only if debug=True and image provided) + + - `self.reference_patches`: Color values of reference patches + - `self.reference_grid_image`: Grid image of reference patches + - `self.reference_debug_image`: Debug visualization (only if debug=True and image provided) + """ # noqa: E501 if image is None: self.reference_patches = reference_color_d50_bgr @@ -323,19 +328,21 @@ def set_input_patches(self, image: np.ndarray, debug: bool = False) -> None: ------- tuple Contains three elements: - - input_patches : np.ndarray + + - `self.input_patches` : np.ndarray Extracted color patches from the image - - input_grid_image : np.ndarray + - `self.input_grid_image` : np.ndarray Visualization of the detected grid - - input_debug_image : np.ndarray + - `self.input_debug_image` : np.ndarray Debug visualization (if debug=True) Notes ----- The function will set class attributes: - - `self.input_patches` - - `self.input_grid_image` - - `self.input_debug_image` + + - `self.input_patches` + - `self.input_grid_image` + - `self.input_debug_image` The function first resets these attributes to None before processing πŸ”„ """ @@ -350,7 +357,7 @@ def set_input_patches(self, image: np.ndarray, debug: bool = False) -> None: ) = self._extract_color_patches(image=image, debug=debug) return self.input_patches, self.input_grid_image, self.input_debug_image - def fit(self) -> tuple[NDArray, list[ColorPatchType], list[ColorPatchType]]: + def fit(self) -> tuple[np.ndarray, list[ColorPatchType], list[ColorPatchType]]: """Fit color correction model using input and reference images. Parameters @@ -362,7 +369,7 @@ def fit(self) -> tuple[NDArray, list[ColorPatchType], list[ColorPatchType]]: Returns ------- - Tuple[np.ndarray, List[np.ndarray], List[np.ndarray]] + tuple[np.ndarray, list[np.ndarray], list[np.ndarray]] Correction weights, input patches, and reference patches. """ if self.reference_patches is None: @@ -386,15 +393,15 @@ def fit(self) -> tuple[NDArray, list[ColorPatchType], list[ColorPatchType]]: def predict( self, - input_image: ImageType, + input_image: ImageBGR, debug: bool = False, debug_output_dir: str = "output-debug", - ) -> ImageType: + ) -> ImageBGR: """Apply color correction to input image. Parameters ---------- - input_image : ImageType + input_image : ImageBGR Image to be color corrected. debug : bool, optional Whether to save debug visualizations. @@ -403,7 +410,7 @@ def predict( Returns ------- - ImageType + ImageBGR Color corrected image. Raises @@ -428,6 +435,37 @@ def predict( return corrected_image def calc_color_diff_patches(self) -> dict: + """ + Calculate color difference metrics for image patches using the dE CIE 2000 metric. + + This method computes the color differences between: + + - The initial (uncorrected) input patches and the reference patches. + - The corrected patches and the reference patches. + + It then calculates the delta as the difference between the initial and corrected color differences + (i.e., initial minus corrected) to assess the change in color discrepancy after correction. + + Notes + ----- + This function processes patches only, not whole images. The calculations compare the color differences + between patches before correction and patches after correction against the same reference patches. + + Returns + ------- + dict + A dictionary with the following keys: + + - `initial`: dict containing the color difference metrics for the initial patches versus the reference. + - `corrected`: dict containing the color difference metrics for the corrected patches versus the reference. + - `delta`: dict with metrics representing the difference between the initial and corrected color differences. + Each metric is computed as: + ```python + metric_delta = metric_initial - metric_corrected, + ``` + where metrics include `min`, `max`, `mean`, and `std`. + + """ # noqa: E501 initial_color_diff = calc_color_diff( image1=self.input_grid_image, image2=self.reference_grid_image, diff --git a/color_correction/services/correction_analyzer.py b/color_correction/services/correction_analyzer.py index 3d8487a..aabd21d 100644 --- a/color_correction/services/correction_analyzer.py +++ b/color_correction/services/correction_analyzer.py @@ -3,7 +3,7 @@ import numpy as np import pandas as pd -from color_correction.constant.methods import ( +from color_correction.schemas.custom_types import ( LiteralModelCorrection, LiteralModelDetection, ) @@ -163,7 +163,7 @@ def run( input_image : np.ndarray The image to be processed. output_dir : str, optional - The directory to save reports, by default "benchmark_debug". + The directory to save reports, by default `benchmark_debug`. reference_image : np.ndarray, optional Optional reference image used for evaluation, by default None. @@ -172,6 +172,15 @@ def run( pd.DataFrame A DataFrame containing results of all experiments. + Notes + ----- + The specified output directory will be created if it does not exist. All benchmark results are saved in this folder. + The directory to save benchmark results, by default "benchmark_debug". This folder will contain: + + - **An HTML report**: includes a matrix table showing correction methods vs. evaluation delta E (CIE 2000) and preview images. + - **A CSV file**: A CSV report of the DataFrame with image data columns removed. + - **A PKL file**: A pickle file containing the full DataFrame. + Examples -------- >>> import numpy as np diff --git a/color_correction/utils/correction.py b/color_correction/utils/correction.py index 91f6f47..7c5ffe6 100644 --- a/color_correction/utils/correction.py +++ b/color_correction/utils/correction.py @@ -2,6 +2,19 @@ def preprocessing_compute(input_image: np.ndarray) -> np.ndarray: + """ + Preprocess the input image for computation by reshaping and converting datatype. + + Parameters + ---------- + input_image : np.ndarray + Input image array that can be either a grid (24, 3) or a general image. + + Returns + ------- + np.ndarray + Processed image data as a float32 numpy array. + """ if input_image.shape == (24, 3): # to handle grid image patches only image = input_image.astype(np.float32) @@ -14,6 +27,21 @@ def postprocessing_compute( original_shape: tuple, predict_image: np.ndarray, ) -> np.ndarray: + """ + Convert predicted image data back into its original shape and type. + + Parameters + ---------- + original_shape : tuple + The original dimensions of the image. Should be 2 or (H, W, C). + predict_image : np.ndarray + The processed image data to be reshaped and clipped. + + Returns + ------- + np.ndarray + The final corrected image, reshaped to original dimensions and in uint8 format. + """ if len(original_shape) == 2: # to handle grid image patches only corrected_image = np.clip(predict_image, 0, 255).astype(np.uint8) diff --git a/color_correction/utils/device_info.py b/color_correction/utils/device_info.py index e9b456c..868f95a 100644 --- a/color_correction/utils/device_info.py +++ b/color_correction/utils/device_info.py @@ -10,23 +10,18 @@ def detect_darwin(specs: dict[str, Any]) -> dict[str, Any]: - """Detect hardware specifications for macOS (Darwin). + """ + Detect hardware specifications on macOS, including CPU and GPU details. Parameters ---------- specs : dict - Base specifications dictionary with OS information. + Initial dictionary containing OS information. Returns ------- dict - Updated specifications with CPU and GPU information. - - Notes - ----- - Detects: - 1. Apple Silicon vs Intel via sysctl - 2. GPU type (Apple/AMD/NVIDIA) via system_profiler + Updated dictionary with CPU architecture and GPU type for macOS. """ try: cpu_info = subprocess.check_output( @@ -58,23 +53,18 @@ def detect_darwin(specs: dict[str, Any]) -> dict[str, Any]: def detect_linux(specs: dict[str, Any]) -> dict[str, Any]: - """Detect hardware specifications for Linux. + """ + Detect hardware specifications on Linux systems. Parameters ---------- specs : dict - Base specifications dictionary with OS information. + Initial dictionary with OS information. Returns ------- dict - Updated specifications with CPU and GPU information. - - Notes - ----- - Detects: - 1. CPU architecture (ARM/x86_64) via lscpu - 2. GPU type (NVIDIA/AMD) via nvidia-smi or lspci + Updated dictionary with CPU architecture and GPU type for Linux. """ try: cpu_info = subprocess.check_output("lscpu", shell=True).decode().lower() @@ -110,17 +100,18 @@ def detect_linux(specs: dict[str, Any]) -> dict[str, Any]: def detect_windows(specs: dict[str, Any]) -> dict[str, Any]: - """Detect hardware specifications for Windows. + """ + Detect hardware specifications on Windows systems. Parameters ---------- specs : dict - Base specifications dictionary with OS information. + Initial dictionary with OS information. Returns ------- dict - Updated specifications with CPU and GPU information. + Updated dictionary with CPU architecture and GPU type for Windows. """ proc = platform.processor().lower() if "intel" in proc: @@ -137,12 +128,14 @@ def detect_windows(specs: dict[str, Any]) -> dict[str, Any]: def get_device_specs() -> DeviceSpecs: - """Get device hardware specifications. + """ + Retrieve a structured set of device hardware specifications. Returns ------- DeviceSpecs - Pydantic model containing device specifications. + An object containing OS name, CPU architecture, GPU type, + and Apple Silicon flag. """ specs = { "os_name": platform.system(), diff --git a/color_correction/utils/downloader.py b/color_correction/utils/downloader.py index 19770e6..f472d39 100644 --- a/color_correction/utils/downloader.py +++ b/color_correction/utils/downloader.py @@ -10,19 +10,18 @@ def download_google_drive_file(file_id: str, output_file: str) -> None: """ - Download a file from Google Drive using its file ID. + Download a file from Google Drive using a file ID. Parameters ---------- file_id : str - The unique ID of the file on Google Drive. + Unique identifier of the file on Google Drive. output_file : str - The name of the output file where the content will be saved. + Local path where the downloaded file will be saved. Returns ------- None - Downloads the file and saves it locally. """ url: Final = f"https://drive.google.com/uc?export=download&id={file_id}" @@ -51,6 +50,19 @@ def download_google_drive_file(file_id: str, output_file: str) -> None: def downloader_model_yolov8(use_gpu: bool = False) -> str: + """ + Download the appropriate YOLOv8 model based on device specifications. + + Parameters + ---------- + use_gpu : bool, optional + Flag indicating whether to use a GPU model; default is False. + + Returns + ------- + str + The file path to the downloaded YOLOv8 model. + """ specs = get_device_specs() model_folder = os.path.join(os.getcwd(), "tmp", "models") if use_gpu: diff --git a/color_correction/utils/formater.py b/color_correction/utils/formater.py index 0afce12..ffd71a8 100644 --- a/color_correction/utils/formater.py +++ b/color_correction/utils/formater.py @@ -6,7 +6,25 @@ def format_value(value: np.ndarray | dict | list | float | str) -> str: - """Format different types of values for HTML display""" + """ + Format different types of values for HTML display. + + Parameters + ---------- + value : np.ndarray, dict, list, float, or str + The input value that needs to be formatted. If the value is: + + - an `np.ndarray`, it is assumed to represent an image array and will be converted + to a base64-encoded HTML image. + - a `dict` or `list`, it will be converted to its JSON string representation. + - an `np.float64` or `np.float32`, it will be formatted as a float with 4 decimal places. + - any other type, it will be converted to a string using the str() function. + + Returns + ------- + str + A string representation of the input value formatted for HTML display. + """ # noqa: E501 if isinstance(value, np.ndarray): # Image arrays return f'' elif isinstance(value, dict | list): # Dictionaries or lists diff --git a/color_correction/utils/geometry_processing.py b/color_correction/utils/geometry_processing.py index 812dd92..770a320 100644 --- a/color_correction/utils/geometry_processing.py +++ b/color_correction/utils/geometry_processing.py @@ -8,6 +8,24 @@ def get_max_iou_shapely( ref_box: shapely.geometry.box, target_boxes: list[shapely.geometry.box], ) -> tuple[float, int, shapely.geometry.box]: + """ + Find the target box with the highest IoU compared to a reference box. + + Parameters + ---------- + ref_box : shapely.geometry.box + The reference bounding box. + target_boxes : list of shapely.geometry.box + List of candidate boxes. + + Returns + ------- + tuple + A tuple containing: + - Maximum IoU (float) + - Index of the box with the maximum IoU (int) + - The corresponding target box (shapely.geometry.box) + """ max_iou = 0 max_idx = -1 @@ -29,16 +47,54 @@ def get_max_iou_shapely( def box_to_xyxy(box: shapely.geometry.box) -> tuple[int, int, int, int]: - """Convert shapely box to xyxy format""" + """ + Convert a Shapely box to (x1, y1, x2, y2) format. + + Parameters + ---------- + box : shapely.geometry.box + Input Shapely box. + + Returns + ------- + tuple[int, int, int, int] + Coordinates in (x1, y1, x2, y2) format. + """ minx, miny, maxx, maxy = box.bounds return int(minx), int(miny), int(maxx), int(maxy) def box_centroid_xy(box: shapely.geometry.box) -> tuple[int, int]: + """ + Get the centroid coordinates of a Shapely box. + + Parameters + ---------- + box : shapely.geometry.box + Input Shapely box. + + Returns + ------- + tuple[int, int] + Coordinates of the centroid (x, y). + """ return int(box.centroid.x), int(box.centroid.y) def generate_expected_patches(card_box: box_tuple) -> list[box_tuple]: + """ + Generate a grid of expected patch coordinates within a card box. + + Parameters + ---------- + card_box : tuple[int, int, int, int] + Coordinates of the card in (x1, y1, x2, y2) format. + + Returns + ------- + list[box_tuple] + List of patch coordinates arranged in a grid. + """ card_x1, card_y1, card_x2, card_y2 = card_box card_width = card_x2 - card_x1 card_height = card_y2 - card_y1 @@ -63,6 +119,21 @@ def extract_intersecting_patches( ls_patches: list[box_tuple], ls_grid_card: list[box_tuple], ) -> list[tuple[box_tuple, tuple[int, int]]]: + """ + Extract patches that intersect with each grid card and compute centroids. + + Parameters + ---------- + ls_patches : list[box_tuple] + List of detected patch coordinates. + ls_grid_card : list[box_tuple] + List of grid card coordinates. + + Returns + ------- + list[tuple[box_tuple, tuple[int, int]]] + Each element is a tuple of the intersecting patch coordinates and its centroid. + """ ls_ordered_patch = [] for _, grid_card in enumerate(ls_grid_card, start=1): # get intesect patch @@ -89,6 +160,19 @@ def extract_intersecting_patches( def calculate_patch_statistics(ls_ordered_patch: list[box_tuple]) -> tuple: + """ + Calculate mean differences in positions and sizes for patches. + + Parameters + ---------- + ls_ordered_patch : list[box_tuple] + List of patch coordinates. + + Returns + ------- + tuple + A tuple containing mean dx, mean dy, mean width, and mean height. + """ ls_dx = [] ls_dy = [] ls_w_grid = [] @@ -126,6 +210,20 @@ def calculate_patch_statistics(ls_ordered_patch: list[box_tuple]) -> tuple: def suggest_missing_patch_coordinates( # noqa: C901 ls_ordered_patch: list[box_tuple], ) -> dict[int, box_tuple]: + """ + Suggest coordinates for missing patches based on neighboring patches. + + Parameters + ---------- + ls_ordered_patch : list[box_tuple] + List of ordered patch coordinates (with None for missing patches). + + Returns + ------- + dict[int, box_tuple] + A dictionary where keys are indices of missing patches and values + are the suggested coordinates. + """ d_suggest = {} mean_dx, mean_dy, mean_w, mean_h = calculate_patch_statistics( diff --git a/color_correction/utils/image_processing.py b/color_correction/utils/image_processing.py index a0cc535..264a8af 100644 --- a/color_correction/utils/image_processing.py +++ b/color_correction/utils/image_processing.py @@ -15,21 +15,22 @@ def crop_region_with_margin( coordinates: tuple[int, int, int, int], margin_ratio: float = 0.2, ) -> np.ndarray: - """Crop a region from image with additional margin from given coordinates. + """ + Crop a sub-region from an image with an additional margin. Parameters ---------- image : np.ndarray - Input image array of shape (H, W, C) or (H, W). - coordinates : np.ndarray - Bounding box coordinates [x1, y1, x2, y2]. + The input image (H, W, C) or (H, W). + coordinates : tuple[int, int, int, int] + The bounding box defined as (x1, y1, x2, y2). margin_ratio : float, optional - Ratio of margin to add relative to region size, by default 0.2. + Ratio to determine the extra margin; default is 0.2. Returns ------- np.ndarray - Cropped image region with margins. + The cropped image region including the margin. """ y1, y2 = coordinates[1], coordinates[3] x1, x2 = coordinates[0], coordinates[2] @@ -48,17 +49,18 @@ def crop_region_with_margin( def calc_mean_color_patch(img: np.ndarray) -> np.ndarray: - """Calculate mean RGB/BGR values across spatial dimensions. + """ + Compute the mean color of an image patch across spatial dimensions. Parameters ---------- img : np.ndarray - Input image array of shape (H, W, C). + The input image patch with shape (H, W, C). Returns ------- np.ndarray - Array of mean RGB values, shape (C,), dtype uint8. + Array containing the mean color for each channel (dtype uint8). """ return np.mean(img, axis=(0, 1)).astype(np.uint8) @@ -67,18 +69,20 @@ def calc_color_diff( image1: ImageType, image2: ImageType, ) -> dict[str, float]: - """Calculate color difference metrics between two images. + """ + Calculate color difference metrics between two images using CIE 2000. Parameters ---------- - image1, image2 : NDArray - Images to compare in BGR format. + image1 : ImageType + First input image in BGR format. + image2 : ImageType + Second input image in BGR format. Returns ------- dict[str, float] - Dictionary of color difference - keys: min, max, mean, std + Dictionary with keys 'min', 'max', 'mean', and 'std' for the color difference. """ rgb1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB) rgb2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB) @@ -100,7 +104,21 @@ def numpy_array_to_base64( arr: np.ndarray, convert_bgr_to_rgb: bool = True, ) -> str: - """Convert numpy array (image) to base64 string""" + """ + Convert a numpy image array into a base64-encoded PNG string. + + Parameters + ---------- + arr : np.ndarray + Input image array. + convert_bgr_to_rgb : bool, optional + Whether to convert BGR to RGB before encoding; default is True. + + Returns + ------- + str + Base64-encoded image string prefixed with the appropriate data URI. + """ if arr is None: return "" diff --git a/color_correction/utils/report_generator.py b/color_correction/utils/report_generator.py index 2969965..66bb4f9 100644 --- a/color_correction/utils/report_generator.py +++ b/color_correction/utils/report_generator.py @@ -12,12 +12,36 @@ def __init__(self) -> None: self.template_dir = "color_correction.templates" def _read_template(self, filename: str) -> str: - """Read template file content""" + """ + Read and return the content of a template file. + + Parameters + ---------- + filename : str + Name of the template file to load. + + Returns + ------- + str + Content of the template file. + """ with resources.files(self.template_dir).joinpath(filename).open("r") as f: return f.read() def generate_report(self, body_report: str) -> str: - """Generate full HTML report""" + """ + Generate a complete HTML report by combining components. + + Parameters + ---------- + body_report : str + The main HTML string representing the report body. + + Returns + ------- + str + The final HTML report as a string. + """ # Load components styles = self._read_template("style-report.css") scripts = self._read_template("script-report.js") @@ -37,7 +61,21 @@ def generate_report(self, body_report: str) -> str: return final_html def generate_table(self, headers: list, rows: list) -> str: - """Generate table HTML""" + """ + Generate an HTML table from headers and row data. + + Parameters + ---------- + headers : list + List of table headers. + rows : list + List of rows where each row is a string of HTML table cells. + + Returns + ------- + str + HTML string representing the complete table. + """ headers_html = "".join([f"{h}" for h in headers]) rows_html = "".join([f"{r}" for r in rows]) @@ -53,7 +91,21 @@ def generate_table(self, headers: list, rows: list) -> str: """ def generate_html_report(self, df: pd.DataFrame, path_html: str) -> str: - """Generate HTML report from DataFrame with images""" + """ + Generate and save an HTML report from a pandas DataFrame. + + Parameters + ---------- + df : pd.DataFrame + DataFrame containing the report data. + path_html : str + Output file path to save the HTML report. + + Returns + ------- + str + The generated HTML report as a string. + """ df_html = df.copy() # Generate rows @@ -78,5 +130,14 @@ def generate_html_report(self, df: pd.DataFrame, path_html: str) -> str: return report_html def save_dataframe(self, df: pd.DataFrame, filepath: str) -> None: - """Save DataFrame with pickle to preserve numpy arrays""" + """ + Save a DataFrame to a file using pickle format. + + Parameters + ---------- + df : pd.DataFrame + The DataFrame to be saved. + filepath : str + Output file path. + """ df.to_pickle(filepath) diff --git a/color_correction/utils/visualization_utils.py b/color_correction/utils/visualization_utils.py index 2f81ccf..edd1294 100644 --- a/color_correction/utils/visualization_utils.py +++ b/color_correction/utils/visualization_utils.py @@ -12,25 +12,26 @@ def create_image_grid_visualization( dpi: int = 300, ) -> matplotlib.figure.Figure: """ - Display images in a grid layout with titles + Create a grid visualization of images with optional saving. - Parameters: - ----------- - images : List[Tuple[str, Union[np.ndarray, matplotlib.figure.Figure, None]]] - List of tuples containing (title, image) - grid_size : Tuple[int, int] - Grid layout in (rows, columns) format - figsize : Tuple[int, int] - Size of the entire figure in inches - save_path : Optional[str] - If provided, save the figure to this path - dpi : int - DPI for saved figure + Parameters + ---------- + images : list of tuple + List where each tuple contains (title, image) and image can be a numpy array, + a matplotlib Figure, or None. + grid_size : tuple[int, int], optional + Tuple of (rows, columns) defining the grid layout; default is (2, 3). + figsize : tuple[int, int], optional + Size of the matplotlib figure in inches; default is (15, 10). + save_path : str | None, optional + File path to save the figure; if None, the figure is not saved. + dpi : int, optional + Dots per inch for the saved image; default is 300. - Returns: - -------- + Returns + ------- matplotlib.figure.Figure - The figure object containing the grid + The created matplotlib figure containing the image grid. """ rows, cols = grid_size diff --git a/color_correction/utils/yolo_utils.py b/color_correction/utils/yolo_utils.py index 96925ff..560819e 100644 --- a/color_correction/utils/yolo_utils.py +++ b/color_correction/utils/yolo_utils.py @@ -24,34 +24,22 @@ # Detection Processing Functions def nms(boxes: np.ndarray, scores: np.ndarray, iou_threshold: float) -> list[int]: - """Apply Non-Maximum Suppression (NMS) to filter overlapping bounding boxes. - - NMS is used to eliminate redundant bounding boxes in object detection tasks. - It selects the bounding boxes with the highest confidence scores while - removing boxes that overlap significantly with them. - - Logic: - - No Overlap (IoU = 0): - Boxes are retained as they do not affect each other. - - High Overlap (IoU > threshold): - The box with the lower confidence score is removed, as it is considered - a duplicate detection of the same object. - - Low Overlap (IoU < threshold): - Both boxes are retained, as they are considered detections of different objects. + """ + Apply Non-Maximum Suppression (NMS) to filter overlapping bounding boxes. Parameters ---------- boxes : np.ndarray - Array of bounding boxes in format (x1, y1, x2, y2). + Array of bounding boxes with shape (N, 4) in (x1, y1, x2, y2) format. scores : np.ndarray - Array of confidence scores for each box. + Confidence scores for each bounding box. iou_threshold : float - IoU threshold for filtering overlapping boxes. + Threshold for Intersection over Union (IoU) to filter boxes. Returns ------- list[int] - Indices of the bounding boxes to keep after applying NMS. + List of indices for boxes to keep. """ # Sort by score sorted_indices = np.argsort(scores)[::-1] @@ -84,23 +72,24 @@ def multiclass_nms( class_ids: np.ndarray, iou_threshold: float, ) -> list[int]: - """Apply non-maximum suppression to boxes across multiple classes. + """ + Perform Non-Maximum Suppression (NMS) on boxes across multiple classes. Parameters ---------- boxes : np.ndarray - Array of bounding boxes in format (x1, y1, x2, y2). + Array of bounding boxes in (x1, y1, x2, y2) format. scores : np.ndarray - Array of confidence scores for each box. + Confidence scores corresponding to each box. class_ids : np.ndarray - Array of class IDs for each box. + Class identifier for each bounding box. iou_threshold : float - IoU threshold for filtering overlapping boxes. + IoU threshold to determine overlapping boxes. Returns ------- list[int] - Indices of boxes to keep after multiclass NMS. + List of indices for boxes to keep. """ unique_class_ids = np.unique(class_ids) @@ -116,19 +105,20 @@ def multiclass_nms( def compute_iou(box: np.ndarray, boxes: np.ndarray) -> np.ndarray: - """Compute Intersection over Union between a box and an array of boxes. + """ + Compute the Intersection over Union (IoU) between a box and an array of boxes. Parameters ---------- box : np.ndarray - Single box in format (x1, y1, x2, y2). + Single bounding box in (x1, y1, x2, y2) format. boxes : np.ndarray - Array of boxes to compute IoU with. + Array of boxes to compare against. Returns ------- np.ndarray - Array of IoU values between the input box and each box in boxes. + Array containing the IoU of the input box with each box in 'boxes'. """ box = box.astype(np.float32) boxes = boxes.astype(np.float32) @@ -153,17 +143,18 @@ def compute_iou(box: np.ndarray, boxes: np.ndarray) -> np.ndarray: def xywh2xyxy(x: np.ndarray) -> np.ndarray: - """Convert bounding box format from (x, y, w, h) to (x1, y1, x2, y2). + """ + Convert bounding boxes from (x, y, w, h) to (x1, y1, x2, y2) format. Parameters ---------- x : np.ndarray - Array of boxes in (x, y, w, h) format. + Array of bounding boxes in (x, y, w, h) format. Returns ------- np.ndarray - Array of boxes in (x1, y1, x2, y2) format. + Array of bounding boxes in (x1, y1, x2, y2) format. """ # Convert bounding box (x, y, w, h) to bounding box (x1, y1, x2, y2) y = np.copy(x) @@ -182,25 +173,26 @@ def draw_detections( class_ids: list[int], mask_alpha: float = DEFAULT_MASK_ALPHA, ) -> np.ndarray: - """Draw detection boxes, labels and masks on the image. + """ + Draw bounding boxes, labels, and semi-transparent masks on an image. Parameters ---------- image : np.ndarray - Input image to draw on. + The input image on which detections will be drawn. boxes : list[list[int]] - List of bounding boxes in format (x1, y1, x2, y2). + List of bounding boxes represented as (x1, y1, x2, y2). scores : list[float] - List of confidence scores for each detection. + Confidence scores for every box. class_ids : list[int] - List of class IDs for each detection. + Class IDs corresponding to each detection. mask_alpha : float, optional - Transparency of the mask overlay, by default DEFAULT_MASK_ALPHA. + Transparency of the drawn mask, default is DEFAULT_MASK_ALPHA. Returns ------- np.ndarray - Image with drawn detections. + The annotated image with drawn detections. """ det_img = image.copy() img_height, img_width = image.shape[:2] @@ -230,23 +222,24 @@ def draw_box( color: tuple[int, int, int] = DEFAULT_COLOR, thickness: int = DEFAULT_THICKNESS, ) -> np.ndarray: - """Draw a bounding box on the image. + """ + Draw a single bounding box on an image. Parameters ---------- image : np.ndarray - Input image to draw on. + The original image. box : list[int] - Bounding box coordinates in format (x1, y1, x2, y2). + Bounding box coordinates in (x1, y1, x2, y2) format. color : tuple[int, int, int], optional - RGB color for the box, by default DEFAULT_COLOR. + Color of the box in RGB format; default is DEFAULT_COLOR. thickness : int, optional - Line thickness of the box, by default DEFAULT_THICKNESS. + Line thickness; default is DEFAULT_THICKNESS. Returns ------- np.ndarray - Image with drawn box. + The image with the box drawn on it. """ x1, y1, x2, y2 = box return cv2.rectangle(image, (x1, y1), (x2, y2), color, thickness) @@ -260,27 +253,28 @@ def draw_text( font_size: float = 0.001, text_thickness: int = 2, ) -> np.ndarray: - """Draw text with background on the image. + """ + Draw text with a background rectangle near a bounding box. Parameters ---------- image : np.ndarray - Input image to draw on. + The image on which text is drawn. text : str - Text to be drawn. + Text string to be displayed. box : list[int] - Bounding box coordinates where text will be placed. + Bounding box coordinates (x1, y1, x2, y2) where the text will be placed. color : tuple[int, int, int], optional - RGB color for text background, by default DEFAULT_COLOR. + Background color for the text; default is DEFAULT_COLOR. font_size : float, optional - Size of the font, by default 0.001. + Font scaling factor; default is 0.001. text_thickness : int, optional - Thickness of the text, by default 2. + Thickness of the text stroke; default is 2. Returns ------- np.ndarray - Image with drawn text. + Image with the annotated text. """ x1, y1, x2, y2 = box (tw, th), _ = cv2.getTextSize( @@ -311,23 +305,24 @@ def draw_masks( classes: list[int], mask_alpha: float = DEFAULT_MASK_ALPHA, ) -> np.ndarray: - """Draw semi-transparent masks for detection boxes. + """ + Overlay semi-transparent masks on the image for detected objects. Parameters ---------- image : np.ndarray - Input image to draw on. + The original image. boxes : list[list[int]] - List of bounding boxes in format (x1, y1, x2, y2). + List of bounding boxes in (x1, y1, x2, y2) format. classes : list[int] - List of class IDs for each box. + Class IDs corresponding to each bounding box. mask_alpha : float, optional - Transparency of the mask overlay, by default DEFAULT_MASK_ALPHA. + Alpha value for mask transparency; default is DEFAULT_MASK_ALPHA. Returns ------- np.ndarray - Image with drawn masks. + The image with masks applied. """ mask_img = image.copy() diff --git a/docs/index.md b/docs/index.md index 942da0b..d43a4ee 100644 --- a/docs/index.md +++ b/docs/index.md @@ -35,59 +35,63 @@ pip install color-correction ## ⚑ How to use -```python -from color_correction import ColorCorrection - -# Step 1: Define the path to the input image -image_path = "asset/images/cc-19.png" - -# Step 2: Load the input image -input_image = cv2.imread(image_path) - -# Step 3: Initialize the color correction model with specified parameters -color_corrector = ColorCorrection( - detection_model="yolov8", - detection_conf_th=0.25, - correction_model="polynomial", # "least_squares", "affine_reg", "linear_reg" - degree=3, # for polynomial correction model - use_gpu=True, -) - -# Step 4: Extract color patches from the input image -# you can set reference patches from another image (image has color checker card) -# or use the default D50 -# color_corrector.set_reference_patches(image=None, debug=True) -color_corrector.set_input_patches(image=input_image, debug=True) -color_corrector.fit() -corrected_image = color_corrector.predict( - input_image=input_image, - debug=True, - debug_output_dir="zzz", -) - -# Step 5: Evaluate the color correction results -eval_result = color_corrector.calc_color_diff_patches() -print(eval_result) -``` - +=== "code" + + ```python + from color_correction import ColorCorrection + + # Step 1: Define the path to the input image + image_path = "asset/images/cc-19.png" + + # Step 2: Load the input image + input_image = cv2.imread(image_path) + + # Step 3: Initialize the color correction model with specified parameters + color_corrector = ColorCorrection( + detection_model="yolov8", + detection_conf_th=0.25, + correction_model="polynomial", # "least_squares", "affine_reg", "linear_reg" + degree=3, # for polynomial correction model + use_gpu=True, + ) + + # Step 4: Extract color patches from the input image + # you can set reference patches from another image (image has color checker card) + # or use the default D50 + # color_corrector.set_reference_patches(image=None, debug=True) + color_corrector.set_input_patches(image=input_image, debug=True) + color_corrector.fit() + corrected_image = color_corrector.predict( + input_image=input_image, + debug=True, + debug_output_dir="zzz", + ) + + # Step 5: Evaluate the color correction results + eval_result = color_corrector.calc_color_diff_patches() + print(eval_result) + ``` -??? info "Sample Evaluation Output" +=== "output evaluation" ```json { "initial": { + // dE values before correction (input patches vs reference) "min": 2.254003059526461, "max": 13.461066402633447, "mean": 8.3072755187654, "std": 3.123962754767539, }, "corrected": { + // dE values after correction (input patches vs reference) "min": 0.30910031798755183, "max": 5.422311999126372, "mean": 1.4965478752947827, "std": 1.2915738724958112, }, "delta": { + // dE values difference (initial vs corrected) "min": 1.9449027415389093, "max": 8.038754403507074, "mean": 6.810727643470616, @@ -97,43 +101,46 @@ print(eval_result) ``` -??? info "Sample Output Debugging Image" +=== "debugging image" ![Sample Output](assets/sample-output-debug.jpg) ## πŸ”Ž Reporting -```python -import cv2 - -from color_correction import ColorCorrectionAnalyzer - -# input_image_path = "assets/cc-19.png" -input_image_path = "assets/cc-1.jpg" - -report = ColorCorrectionAnalyzer( - list_correction_methods=[ - ("least_squares", {}), - ("linear_reg", {}), - ("affine_reg", {}), - ("polynomial", {"degree": 2}), - ("polynomial", {"degree": 3}), - # ("polynomial", {"degree": 4}), - # ("polynomial", {"degree": 5}), - ], - list_detection_methods=[ - ("yolov8", {"detection_conf_th": 0.25}), - ], -) -report.run( - input_image=cv2.imread(input_image_path), - reference_image=None, - output_dir="report-output", -) -``` -??? info "Sample Report Output" +=== "code" + + ```python + import cv2 + + from color_correction import ColorCorrectionAnalyzer + + # input_image_path = "assets/cc-19.png" + input_image_path = "assets/cc-1.jpg" + + report = ColorCorrectionAnalyzer( + list_correction_methods=[ + ("least_squares", {}), + ("linear_reg", {}), + ("affine_reg", {}), + ("polynomial", {"degree": 2}), + ("polynomial", {"degree": 3}), + # ("polynomial", {"degree": 4}), + # ("polynomial", {"degree": 5}), + ], + list_detection_methods=[ + ("yolov8", {"detection_conf_th": 0.25}), + ], + ) + report.run( + input_image=cv2.imread(input_image_path), + reference_image=None, + output_dir="report-output", + ) + ``` + +=== "report output" ![Sample Benchmark Output](assets/sample-benchmark.png) diff --git a/docs/reference/core/card_detection/yv8_onnx.md b/docs/reference/core/card_detection/yv8_onnx.md index fe826f7..d813f95 100644 --- a/docs/reference/core/card_detection/yv8_onnx.md +++ b/docs/reference/core/card_detection/yv8_onnx.md @@ -1,4 +1,4 @@ -# Documentation for `YOLOv8CardDetector` +# `YOLOv8CardDetector` class ::: color_correction.core.card_detection.det_yv8_onnx.YOLOv8CardDetector diff --git a/docs/reference/core/correction/affine_reg.md b/docs/reference/core/correction/affine_reg.md index bb9c7db..da9fa3c 100644 --- a/docs/reference/core/correction/affine_reg.md +++ b/docs/reference/core/correction/affine_reg.md @@ -1,4 +1,4 @@ -# Documentation for `AffineRegression` +# `AffineRegression` class ::: color_correction.core.correction.AffineRegression diff --git a/docs/reference/core/correction/least_squares.md b/docs/reference/core/correction/least_squares.md index b674c7f..8266abf 100644 --- a/docs/reference/core/correction/least_squares.md +++ b/docs/reference/core/correction/least_squares.md @@ -1,3 +1,3 @@ -# Documentation for `LeastSquares` +# `LeastSquares` class ::: color_correction.core.correction.LeastSquaresRegression diff --git a/docs/reference/core/correction/linear_reg.md b/docs/reference/core/correction/linear_reg.md index 8a5ec6f..09fc55e 100644 --- a/docs/reference/core/correction/linear_reg.md +++ b/docs/reference/core/correction/linear_reg.md @@ -1,3 +1,3 @@ -# Documentation for `LinearRegression` +# `LinearRegression` class ::: color_correction.core.correction.LinearRegression diff --git a/docs/reference/core/correction/polynomial.md b/docs/reference/core/correction/polynomial.md index 7dde95d..51ea926 100644 --- a/docs/reference/core/correction/polynomial.md +++ b/docs/reference/core/correction/polynomial.md @@ -1,3 +1,3 @@ -# Documentation for `Polynomial` +# `Polynomial` class ::: color_correction.core.correction.Polynomial diff --git a/docs/reference/schemas/custom_types.md b/docs/reference/schemas/custom_types.md new file mode 100644 index 0000000..32fc333 --- /dev/null +++ b/docs/reference/schemas/custom_types.md @@ -0,0 +1,3 @@ +# `schemas/images` CustomTypes + +::: color_correction.schemas.custom_types diff --git a/docs/reference/schemas/yv8_onnx.md b/docs/reference/schemas/yv8_onnx.md new file mode 100644 index 0000000..6a84c51 --- /dev/null +++ b/docs/reference/schemas/yv8_onnx.md @@ -0,0 +1,3 @@ +# `DetectionResult` class + +::: color_correction.schemas.det_yv8.DetectionResult diff --git a/docs/reference/services/color_correction.md b/docs/reference/services/color_correction.md index e733a10..a9e8609 100644 --- a/docs/reference/services/color_correction.md +++ b/docs/reference/services/color_correction.md @@ -1,3 +1,3 @@ -# Documentation for `ColorCorrection` +# `ColorCorrection` class ::: color_correction.ColorCorrection diff --git a/docs/reference/services/correction_analyzer.md b/docs/reference/services/correction_analyzer.md index f108d3a..cdb0691 100644 --- a/docs/reference/services/correction_analyzer.md +++ b/docs/reference/services/correction_analyzer.md @@ -1,3 +1,3 @@ -# Documentation for `ColorCorrectionAnalyzer` +# `ColorCorrectionAnalyzer` class ::: color_correction.ColorCorrectionAnalyzer diff --git a/docs/reference/utils/correction.md b/docs/reference/utils/correction.md new file mode 100644 index 0000000..622c4bd --- /dev/null +++ b/docs/reference/utils/correction.md @@ -0,0 +1,3 @@ +# `utils/correction` module + +::: color_correction.utils.correction diff --git a/docs/reference/utils/device_info.md b/docs/reference/utils/device_info.md new file mode 100644 index 0000000..14a008d --- /dev/null +++ b/docs/reference/utils/device_info.md @@ -0,0 +1,3 @@ +# `utils/device_info` module + +::: color_correction.utils.device_info diff --git a/docs/reference/utils/downloader.md b/docs/reference/utils/downloader.md new file mode 100644 index 0000000..e1b3f60 --- /dev/null +++ b/docs/reference/utils/downloader.md @@ -0,0 +1,3 @@ +# `utils/downloader` module + +::: color_correction.utils.downloader diff --git a/docs/reference/utils/formater.md b/docs/reference/utils/formater.md new file mode 100644 index 0000000..8760079 --- /dev/null +++ b/docs/reference/utils/formater.md @@ -0,0 +1,3 @@ +# `utils/formater` module + +::: color_correction.utils.formater diff --git a/docs/reference/utils/geometry_proccesing.md b/docs/reference/utils/geometry_proccesing.md new file mode 100644 index 0000000..216f3b8 --- /dev/null +++ b/docs/reference/utils/geometry_proccesing.md @@ -0,0 +1,3 @@ +# `utils/geometry_processing` module + +::: color_correction.utils.geometry_processing diff --git a/docs/reference/utils/image_patch.md b/docs/reference/utils/image_patch.md new file mode 100644 index 0000000..cc86541 --- /dev/null +++ b/docs/reference/utils/image_patch.md @@ -0,0 +1,3 @@ +# `utils/image_patch` module + +::: color_correction.utils.image_patch diff --git a/docs/reference/utils/image_processing.md b/docs/reference/utils/image_processing.md new file mode 100644 index 0000000..be34ebb --- /dev/null +++ b/docs/reference/utils/image_processing.md @@ -0,0 +1,3 @@ +# module `utils/image_processing` + +::: color_correction.utils.image_processing diff --git a/docs/reference/utils/report_generator.md b/docs/reference/utils/report_generator.md new file mode 100644 index 0000000..303916a --- /dev/null +++ b/docs/reference/utils/report_generator.md @@ -0,0 +1,3 @@ +# `utils/report_generator` module + +::: color_correction.utils.report_generator diff --git a/docs/reference/utils/visualization_utils.md b/docs/reference/utils/visualization_utils.md new file mode 100644 index 0000000..9d85c76 --- /dev/null +++ b/docs/reference/utils/visualization_utils.md @@ -0,0 +1,3 @@ +# `utils/visualization_utils` module + +::: color_correction.utils.visualization_utils diff --git a/docs/reference/utils/yolo_utils.md b/docs/reference/utils/yolo_utils.md new file mode 100644 index 0000000..be3c1a1 --- /dev/null +++ b/docs/reference/utils/yolo_utils.md @@ -0,0 +1,3 @@ +# `utils/yolo_utils` module + +::: color_correction.utils.yolo_utils diff --git a/mkdocs.yml b/mkdocs.yml index 07dbd25..3f0f63f 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -55,23 +55,31 @@ theme: - search.suggest - content.code.copy - content.code.annotate + - content.action.view + markdown_extensions: + - admonition + - codehilite + - pymdownx.superfences - pymdownx.inlinehilite - pymdownx.snippets - - pymdownx.superfences - attr_list - md_in_html - pymdownx.blocks.caption - admonition - pymdownx.details - - pymdownx.superfences - pymdownx.critic - pymdownx.caret - pymdownx.keys - pymdownx.mark - pymdownx.tilde - def_list + - pymdownx.tabbed: + alternate_style: true + slugify: !!python/object/apply:pymdownx.slugs.slugify + kwds: + case: lower - pymdownx.tasklist: custom_checkbox: true - pymdownx.highlight: @@ -87,7 +95,7 @@ markdown_extensions: class: mermaid format: !!python/name:pymdownx.superfences.fence_code_format - toc: - permalink: "Β€" + permalink: "ΒΆ" plugins: - search @@ -148,14 +156,28 @@ nav: - Home: index.md - Tutorial: tutorials.md - Reference: - - Core: - - Card Detection: + - core: + - card_detection: - YoloV8 ONNX: reference/core/card_detection/yv8_onnx.md - - Correction: + - correction: - Least Squares: reference/core/correction/least_squares.md - Linear Regression: reference/core/correction/linear_reg.md - Affine Regression: reference/core/correction/affine_reg.md - Polynomial: reference/core/correction/polynomial.md - - Services: - - Color Correction: reference/services/color_correction.md - - Correction Analyzer: reference/services/correction_analyzer.md + - services: + - color_correction: reference/services/color_correction.md + - correction_analyzer: reference/services/correction_analyzer.md + - schemas: + - YoloV8 ONNX: reference/schemas/yv8_onnx.md + - Custom Types: reference/schemas/custom_types.md + - utils: + - correction: reference/utils/correction.md + - device_info: reference/utils/device_info.md + - downloader: reference/utils/downloader.md + - formater: reference/utils/formater.md + - geometry_proccesing: reference/utils/geometry_proccesing.md + - image_patch: reference/utils/image_patch.md + - image_processing: reference/utils/image_processing.md + - report_generator: reference/utils/report_generator.md + - visualization_utils: reference/utils/visualization_utils.md + - yolo_utils: reference/utils/yolo_utils.md