diff --git a/.gitignore b/.gitignore
index 5b2915f..fb6d85b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,3 +21,6 @@ wheels/
tmp/
zzz/
color_correction/asset/images/
+playground/
+assets/cc-1.jpg
+assets/cc-19.png
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5c67668..71c801c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,41 @@
# Changelog
-## [v0.0.1b1] - 2025-02-03
+## [v0.0.1b3] - 2025-02-06
+**Add Analyzer Report and Bug Fixes**
+
+### 🚀 Features
+- Added comprehensive reporting functionality for color correction results
+ - New `ColorCorrectionAnalyzer` class for benchmarking different correction methods
+ - HTML report generation with interactive sorting and PDF export
+ - Visual comparison of before/after color patches
+ - Detailed ΔE metrics for patches and full images
+- Enhanced image processing utilities
+ - Added base64 image conversion support
+ - Improved color difference calculation with rounded metrics
+- Added HTML report generation templates and styling
+ - Responsive design with Plus Jakarta Sans font
+ - Interactive table sorting
+ - PDF export functionality
+ - Detailed column descriptions
+
+### 📝 Documentation
+- Added new Analyzer section in README
+ - Example usage code for ColorCorrectionAnalyzer
+ - Sample benchmark output visualization
+- Updated version to 0.0.1b3
+
+### 🔧 Technical Changes
+- Renamed benchmark class to report for better clarity
+- Added new utility modules:
+ - formater.py for value formatting
+ - report_generator.py for HTML generation
+ - Added new constants and method definitions
+
+## [v0.0.1b2] - 2025-02-05
+Fix naming from `color-correction-asdfghjkl` to `color-correction`
+
+
+## [v0.0.1b1] - 2025-02-04
**Enhanced Color Correction with Improved Documentation and Evaluation**
### ✨ Features
diff --git a/README.md b/README.md
index ad072de..e900991 100644
--- a/README.md
+++ b/README.md
@@ -97,6 +97,41 @@ print(eval_result)
+## 🔎 Reporting
+```python
+import cv2
+
+from color_correction import ColorCorrectionAnalyzer
+
+# input_image_path = "assets/cc-19.png"
+input_image_path = "assets/cc-1.jpg"
+
+report = ColorCorrectionAnalyzer(
+ list_correction_methods=[
+ ("least_squares", {}),
+ ("linear_reg", {}),
+ ("affine_reg", {}),
+ ("polynomial", {"degree": 2}),
+ ("polynomial", {"degree": 3}),
+ # ("polynomial", {"degree": 4}),
+ # ("polynomial", {"degree": 5}),
+ ],
+ list_detection_methods=[
+ ("yolov8", {"detection_conf_th": 0.25}),
+ ],
+)
+report.run(
+ input_image=cv2.imread(input_image_path),
+ reference_image=None,
+ output_dir="report-output",
+)
+```
+
+Sample Report Output
+
+
+
+
## 📈 Benefits
- **Consistency**: Ensure uniform color correction across multiple images.
- **Accuracy**: Leverage the color correction matrix for precise color adjustments.
diff --git a/assets/sample-benchmark.png b/assets/sample-benchmark.png
new file mode 100644
index 0000000..f827e71
Binary files /dev/null and b/assets/sample-benchmark.png differ
diff --git a/color_correction/__init__.py b/color_correction/__init__.py
index f74d97b..89e5312 100644
--- a/color_correction/__init__.py
+++ b/color_correction/__init__.py
@@ -1,4 +1,4 @@
-__version__ = "0.0.1a1"
+__version__ = "0.0.1b3"
# fmt: off
from .constant.color_checker import reference_color_d50_bgr as REFERENCE_COLOR_D50_BGR # noqa: N812, I001
@@ -6,7 +6,7 @@
from .core.card_detection.det_yv8_onnx import YOLOv8CardDetector
from .schemas.det_yv8 import DetectionResult as YOLOv8DetectionResult
from .services.color_correction import ColorCorrection
-
+from .services.correction_analyzer import ColorCorrectionAnalyzer
# fmt: on
__all__ = [
@@ -14,6 +14,7 @@
"REFERENCE_COLOR_D50_BGR",
"REFERENCE_COLOR_D50_RGB",
"ColorCorrection",
+ "ColorCorrectionAnalyzer",
"YOLOv8CardDetector",
"YOLOv8DetectionResult",
]
diff --git a/color_correction/constant/__init__.py b/color_correction/constant/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/color_correction/constant/methods.py b/color_correction/constant/methods.py
new file mode 100644
index 0000000..166d7f8
--- /dev/null
+++ b/color_correction/constant/methods.py
@@ -0,0 +1,10 @@
+from typing import Literal
+
+LiteralModelCorrection = Literal[
+ "least_squares",
+ "polynomial",
+ "linear_reg",
+ "affine_reg",
+]
+
+LiteralModelDetection = Literal["yolov8"]
diff --git a/color_correction/schemas/images.py b/color_correction/schemas/images.py
new file mode 100644
index 0000000..040cde5
--- /dev/null
+++ b/color_correction/schemas/images.py
@@ -0,0 +1,5 @@
+import numpy as np
+from numpy.typing import NDArray
+
+ColorPatchType = NDArray[np.uint8]
+ImageType = NDArray[np.uint8]
diff --git a/color_correction/services/color_correction.py b/color_correction/services/color_correction.py
index f88898a..665cac2 100644
--- a/color_correction/services/color_correction.py
+++ b/color_correction/services/color_correction.py
@@ -1,16 +1,20 @@
import os
-from typing import Literal
import cv2
import numpy as np
from numpy.typing import NDArray
from color_correction.constant.color_checker import reference_color_d50_bgr
+from color_correction.constant.methods import (
+ LiteralModelCorrection,
+ LiteralModelDetection,
+)
from color_correction.core.card_detection.det_yv8_onnx import (
YOLOv8CardDetector,
)
from color_correction.core.correction import CorrectionModelFactory
from color_correction.processor.det_yv8 import DetectionProcessor
+from color_correction.schemas.images import ColorPatchType, ImageType
from color_correction.utils.image_patch import (
create_patch_tiled_image,
visualize_patch_comparison,
@@ -20,15 +24,6 @@
create_image_grid_visualization,
)
-ColorPatchType = NDArray[np.uint8]
-ImageType = NDArray[np.uint8]
-LiteralModelCorrection = Literal[
- "least_squares",
- "polynomial",
- "linear_reg",
- "affine_reg",
-]
-
class ColorCorrection:
"""Color correction handler using color card detection and correction models.
@@ -51,7 +46,7 @@ class ColorCorrection:
Reference image containing color checker card.
If None, uses standard D50 values.
use_gpu : bool, default=True
- Whether to use GPU for card detection.
+ Whether to use GPU for card detection. False will use CPU.
**kwargs : dict
Additional parameters for the correction model.
@@ -67,7 +62,7 @@ class ColorCorrection:
def __init__(
self,
- detection_model: Literal["yolov8"] = "yolov8",
+ detection_model: LiteralModelDetection = "yolov8",
detection_conf_th: float = 0.25,
correction_model: LiteralModelCorrection = "least_squares",
reference_image: ImageType | None = None,
diff --git a/color_correction/services/correction_analyzer.py b/color_correction/services/correction_analyzer.py
new file mode 100644
index 0000000..96f0a61
--- /dev/null
+++ b/color_correction/services/correction_analyzer.py
@@ -0,0 +1,172 @@
+import os
+
+import numpy as np
+import pandas as pd
+
+from color_correction.constant.methods import (
+ LiteralModelCorrection,
+ LiteralModelDetection,
+)
+from color_correction.services.color_correction import ColorCorrection
+from color_correction.utils.image_patch import (
+ visualize_patch_comparison,
+)
+from color_correction.utils.image_processing import calc_color_diff
+from color_correction.utils.report_generator import ReportGenerator
+
+
+class ColorCorrectionAnalyzer:
+ def __init__(
+ self,
+ list_correction_methods: list[tuple[LiteralModelCorrection, dict]],
+ list_detection_methods: list[tuple[LiteralModelDetection, dict]],
+ use_gpu: bool = True,
+ ) -> None:
+ self.list_correction_methods = list_correction_methods
+ self.list_detection_methods = list_detection_methods
+ self.use_gpu = use_gpu
+ self.rg = ReportGenerator()
+
+ def _run_single_exp(
+ self,
+ idx: int,
+ input_image: np.ndarray,
+ det_method: LiteralModelDetection,
+ det_params: dict,
+ cc_method: LiteralModelCorrection,
+ cc_params: dict,
+ reference_image: np.ndarray | None = None,
+ ) -> dict:
+ cc = ColorCorrection(
+ correction_model=cc_method,
+ detection_model=det_method,
+ detection_conf_th=det_params.get("detection_conf_th", 0.25),
+ use_gpu=self.use_gpu,
+ **cc_params,
+ )
+
+ if reference_image is not None:
+ cc.set_reference_image(reference_image)
+ cc.set_input_patches(input_image, debug=True)
+ cc.fit()
+ corrected_image = cc.predict(input_image=input_image)
+ eval_results = cc.calc_color_diff_patches()
+
+ before_comparison = visualize_patch_comparison(
+ ls_mean_in=cc.input_patches,
+ ls_mean_ref=cc.reference_patches,
+ )
+ after_comparison = visualize_patch_comparison(
+ ls_mean_in=cc.corrected_patches,
+ ls_mean_ref=cc.reference_patches,
+ )
+
+ dE_image = calc_color_diff( # noqa: N806
+ image1=input_image,
+ image2=corrected_image,
+ )
+
+ one_row = {
+ "Index": idx,
+ "Detection Method": det_method,
+ "Detection Parameters": det_params,
+ "Drawed Preprocessing Input": cc.input_debug_image,
+ "Drawed Preprocessing Reference": cc.reference_debug_image,
+ "Correction Method": cc_method,
+ "Correction Parameters": cc_params,
+ "Color Patches - Before": before_comparison,
+ "Color Patches - After": after_comparison,
+ "Input Image": input_image,
+ "Corrected Image": corrected_image,
+ "Patch ΔE (Before) - Min": eval_results["initial"]["min"],
+ "Patch ΔE (Before) - Max": eval_results["initial"]["max"],
+ "Patch ΔE (Before) - Mean": eval_results["initial"]["mean"],
+ "Patch ΔE (Before) - Std": eval_results["initial"]["std"],
+ "Patch ΔE (After) - Min": eval_results["corrected"]["min"],
+ "Patch ΔE (After) - Max": eval_results["corrected"]["max"],
+ "Patch ΔE (After) - Mean": eval_results["corrected"]["mean"],
+ "Patch ΔE (After) - Std": eval_results["corrected"]["std"],
+ "Image ΔE - Min": dE_image["min"],
+ "Image ΔE - Max": dE_image["max"],
+ "Image ΔE - Mean": dE_image["mean"],
+ "Image ΔE - Std": dE_image["std"],
+ }
+ return one_row
+
+ def run(
+ self,
+ input_image: np.ndarray,
+ output_dir: str = "benchmark_debug",
+ reference_image: np.ndarray | None = None,
+ ) -> pd.DataFrame:
+ """
+ Fungsi ini menjalankan benchmark untuk model color correction.
+ """
+ ls_data = []
+ idx = 1
+ for det_method, det_params in self.list_detection_methods:
+ for cc_method, cc_params in self.list_correction_methods:
+ print(
+ f"Running benchmark for {cc_method} method with {cc_params}",
+ )
+ data = self._run_single_exp(
+ idx=idx,
+ input_image=input_image,
+ det_method=det_method,
+ det_params=det_params,
+ cc_method=cc_method,
+ cc_params=cc_params,
+ reference_image=reference_image,
+ )
+ idx += 1
+ ls_data.append(data)
+ df_results = pd.DataFrame(ls_data)
+
+ # Generate HTML report path
+ os.makedirs(output_dir, exist_ok=True)
+ html_report_path = os.path.join(output_dir, "report.html")
+ pickel_report_path = os.path.join(output_dir, "report.pkl")
+
+ # Report Generator -----------------------------------------------------
+ self.rg.generate_html_report(df=df_results, path_html=html_report_path)
+ self.rg.save_dataframe(df=df_results, filepath=pickel_report_path)
+
+ # Save CSV report, but without image data
+ df_results.drop(
+ columns=[
+ "Drawed Preprocessing Input",
+ "Drawed Preprocessing Reference",
+ "Color Patches - Before",
+ "Color Patches - After",
+ "Corrected Image",
+ "Input Image",
+ ],
+ ).to_csv(os.path.join(output_dir, "report_no_image.csv"), index=False)
+
+ print("DataFrame shape:", df_results.shape)
+ print("\nDataFrame columns:", df_results.columns.tolist())
+
+
+if __name__ == "__main__":
+ # Pastikan path image sesuai dengan lokasi image Anda
+ input_image_path = "asset/images/cc-19.png"
+
+ benchmark = ColorCorrectionAnalyzer(
+ list_correction_methods=[
+ ("least_squares", {}),
+ ("linear_reg", {}),
+ ("affine_reg", {}),
+ ("polynomial", {"degree": 2}),
+ ("polynomial", {"degree": 3}),
+ ("polynomial", {"degree": 4}),
+ ],
+ list_detection_methods=[
+ ("yolov8", {"detection_conf_th": 0.25}),
+ ],
+ )
+
+ benchmark.run(
+ input_image_path,
+ reference_image=None,
+ output_dir="benchmark_debug",
+ )
diff --git a/color_correction/templates/base_report.html b/color_correction/templates/base_report.html
new file mode 100644
index 0000000..1a556fd
--- /dev/null
+++ b/color_correction/templates/base_report.html
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {body_report}
+
+ {column_descriptions}
+
+
+
+
diff --git a/color_correction/templates/column_description.html b/color_correction/templates/column_description.html
new file mode 100644
index 0000000..28a48da
--- /dev/null
+++ b/color_correction/templates/column_description.html
@@ -0,0 +1,46 @@
+
+
Column Descriptions
+
+
+
Detection & Correction Process
+
+ - Index: Row number identifier
+ - Detection Method: Method used for color checker card and patches detection
+ - Detection Parameters: Parameters used in the detection process
+ - Correction Method: Method used for color correction
+ - Correction Parameters: Parameters used in the correction process
+
+
+
+
Images
+
+ - Drawed Preprocessing Input/Reference: Draw visualization of preprocessing step (detection card, expected patches, etc/)
+ - Color Patches - Before/After: Color patches comparison before and after correction. Outer is reference and inside is input before/after correction
+ - Input/Corrected Image: Original and color-corrected images
+
+
+
+
Color Difference Metrics (ΔE CIE 2000)
+
+ - Patch ΔE (Before): Color difference statistics between reference and input patches before correction
+
+ - • 0-1: Not perceptible difference
+ - • 1-2: Perceptible through close observation
+ - • 2-10: Perceptible at a glance
+ - • 10+: Clear color difference
+
+
+ - Patch ΔE (After): Color difference statistics between reference and corrected patches
+ - Image ΔE: Overall image color difference statistics
+ - Statistics include:
+
+ - • Minimum: Smallest color difference
+ - • Maximum: Largest color difference
+ - • Mean: Average color difference
+ - • Standard Deviation: Variation in differences
+
+
+
+
+
+
diff --git a/color_correction/templates/script-report.js b/color_correction/templates/script-report.js
new file mode 100644
index 0000000..d035f62
--- /dev/null
+++ b/color_correction/templates/script-report.js
@@ -0,0 +1,118 @@
+document.addEventListener('DOMContentLoaded', function() {
+ const getCellValue = (tr, idx) => {
+ // Pastikan tr dan children[idx] ada
+ if (!tr || !tr.children || !tr.children[idx]) {
+ return '';
+ }
+
+ const cell = tr.children[idx];
+
+ // Skip jika cell kosong
+ if (!cell) {
+ return '';
+ }
+
+ // Check if cell contains an image
+ if (cell.querySelector('img')) {
+ return ''; // Skip sorting for images
+ }
+
+ // Get text content, trim whitespace
+ const content = cell.textContent.trim();
+
+ // If empty content, return empty string
+ if (!content) {
+ return '';
+ }
+
+ // Try parse as number first
+ const num = Number(content);
+ if (!isNaN(num)) {
+ return num;
+ }
+
+ // Try parse as JSON
+ try {
+ return JSON.parse(content);
+ } catch {
+ return content;
+ }
+ };
+
+ const comparer = (idx) => (a, b) => {
+ const v1 = getCellValue(a, idx);
+ const v2 = getCellValue(b, idx);
+
+ // Handle empty values
+ if (v1 === '' && v2 === '') return 0;
+ if (v1 === '') return 1;
+ if (v2 === '') return -1;
+
+ // Handle different types
+ if (typeof v1 === 'number' && typeof v2 === 'number') {
+ return v1 - v2;
+ }
+
+ if (typeof v1 === 'object' && typeof v2 === 'object') {
+ return JSON.stringify(v1).localeCompare(JSON.stringify(v2));
+ }
+
+ return v1.toString().localeCompare(v2.toString());
+ };
+
+ document.querySelectorAll('table thead th').forEach((th, index) => {
+ let asc = true; // Initial sort direction
+
+ th.addEventListener('click', function() {
+ const tbody = this.closest('table').querySelector('tbody');
+ if (!tbody) return;
+
+ const rows = Array.from(tbody.querySelectorAll('tr'));
+ if (!rows.length) return;
+
+ // Sort rows
+ rows.sort((a, b) => {
+ const result = comparer(index)(a, b);
+ return asc ? result : -result;
+ });
+
+ // Clear existing rows
+ while (tbody.firstChild) {
+ tbody.removeChild(tbody.firstChild);
+ }
+
+ // Add sorted rows
+ tbody.append(...rows);
+
+ // Update sort direction indicators
+ th.closest('tr').querySelectorAll('th').forEach(header => {
+ header.classList.remove('sort-asc', 'sort-desc');
+ });
+ th.classList.add(asc ? 'sort-asc' : 'sort-desc');
+
+ // Toggle direction for next click
+ asc = !asc;
+ });
+ });
+});
+
+function exportToPDF() {
+ const element = document.querySelector('.report-container');
+ const opt = {
+ margin: 1,
+ filename: 'color-correction-report.pdf',
+ image: { type: 'jpeg', quality: 0.98 },
+ html2canvas: { scale: 2 },
+ jsPDF: { unit: 'in', format: 'a4', orientation: 'landscape' }
+ };
+
+ const btn = document.querySelector('.export-btn');
+ btn.textContent = 'Generating PDF...';
+ btn.disabled = true;
+
+ html2pdf().set(opt).from(element).save()
+ .then(() => {
+ btn.textContent = 'Export to PDF';
+ btn.disabled = false;
+ });
+}
diff --git a/color_correction/templates/style-report.css b/color_correction/templates/style-report.css
new file mode 100644
index 0000000..18f89f9
--- /dev/null
+++ b/color_correction/templates/style-report.css
@@ -0,0 +1,197 @@
+/* ... (previous styles remain the same) ... */
+* {
+ margin: 0;
+ padding: 0;
+ box-sizing: border-box;
+ font-family: 'Plus Jakarta Sans', sans-serif;
+}
+
+body {
+ padding: 2rem;
+ background-color: #f5f5f5;
+}
+
+.report-container {
+ background: white;
+ border-radius: 12px;
+ box-shadow: 0 2px 8px rgba(0,0,0,0.1);
+ padding: 2rem;
+ margin: 0 auto;
+ max-width: 1400px;
+}
+
+.report-header {
+ margin-bottom: 2rem;
+}
+
+h1 {
+ color: #1a1a1a;
+ font-size: 1.8rem;
+ margin-bottom: 0.5rem;
+}
+
+.timestamp {
+ color: #666;
+ font-size: 0.9rem;
+ margin-bottom: 2rem;
+}
+
+.table-container {
+ overflow: auto;
+ max-height: 80vh;
+ position: relative;
+}
+
+table {
+ border-collapse: separate;
+ border-spacing: 0;
+ width: 100%;
+ background: white;
+}
+
+thead {
+ position: sticky;
+ top: 0;
+ z-index: 10;
+}
+
+th {
+ background: #f8f9fa;
+ padding: 1rem;
+ text-align: left;
+ color: #444;
+ font-weight: 600;
+ border-bottom: 2px solid #dee2e6;
+ cursor: pointer;
+ transition: background-color 0.2s;
+ white-space: nowrap;
+}
+
+th:hover {
+ background-color: #e9ecef;
+ cursor: pointer;
+}
+
+th.sort-asc::after {
+ content: " ↑";
+ color: #0066cc;
+}
+
+th.sort-desc::after {
+ content: " ↓";
+ color: #0066cc;
+}
+
+td {
+ padding: 1rem;
+ border-bottom: 1px solid #dee2e6;
+ vertical-align: top;
+}
+
+tr:hover {
+ background-color: #f8f9fa;
+}
+
+img {
+ max-width: 300px;
+ border-radius: 6px;
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
+}
+
+pre {
+ white-space: pre-wrap;
+ word-wrap: break-word;
+ background: #f8f9fa;
+ padding: 0.5rem;
+ border-radius: 4px;
+ font-size: 0.9rem;
+ color: #444;
+}
+
+@media (max-width: 768px) {
+ body {
+ padding: 1rem;
+ }
+
+ .report-container {
+ padding: 1rem;
+ }
+
+ th, td {
+ padding: 0.75rem;
+ }
+}
+
+.column-descriptions {
+ margin-top: 3rem;
+ padding-top: 2rem;
+ border-top: 1px solid #dee2e6;
+}
+
+.column-descriptions h3 {
+ font-size: 1.4rem;
+ margin-bottom: 1.5rem;
+ color: #1a1a1a;
+}
+
+.description-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
+ gap: 2rem;
+}
+
+.description-item {
+ background: #f8f9fa;
+ padding: 1.5rem;
+ border-radius: 8px;
+}
+
+.description-item h4 {
+ color: #0066cc;
+ margin-bottom: 1rem;
+ font-size: 1.1rem;
+}
+
+.description-item ul {
+ list-style-type: none;
+ padding: 0;
+}
+
+.description-item li {
+ margin-bottom: 0.5rem;
+ font-size: 0.9rem;
+ line-height: 1.4;
+}
+
+.description-item li strong {
+ color: #444;
+}
+
+.export-btn {
+ position: fixed;
+ bottom: 2rem;
+ right: 2rem;
+ background: #0066cc;
+ color: white;
+ border: none;
+ padding: 0.8rem 1.5rem;
+ border-radius: 8px;
+ cursor: pointer;
+ font-family: 'Plus Jakarta Sans', sans-serif;
+ font-weight: 600;
+ box-shadow: 0 2px 8px rgba(0,0,0,0.1);
+ transition: all 0.2s;
+ z-index: 100;
+}
+
+.export-btn:hover {
+ background: #0052a3;
+ transform: translateY(-2px);
+ box-shadow: 0 4px 12px rgba(0,0,0,0.15);
+}
+
+@media print {
+ .export-btn {
+ display: none;
+ }
+}
diff --git a/color_correction/utils/formater.py b/color_correction/utils/formater.py
new file mode 100644
index 0000000..0afce12
--- /dev/null
+++ b/color_correction/utils/formater.py
@@ -0,0 +1,16 @@
+import json
+
+import numpy as np
+
+from color_correction.utils.image_processing import numpy_array_to_base64
+
+
+def format_value(value: np.ndarray | dict | list | float | str) -> str:
+ """Format different types of values for HTML display"""
+ if isinstance(value, np.ndarray): # Image arrays
+ return f'
'
+ elif isinstance(value, dict | list): # Dictionaries or lists
+ return json.dumps(value)
+ elif isinstance(value, np.float64 | np.float32): # Numpy float types
+ return f"{float(value):.4f}"
+ return str(value)
diff --git a/color_correction/utils/image_processing.py b/color_correction/utils/image_processing.py
index c41292b..a0cc535 100644
--- a/color_correction/utils/image_processing.py
+++ b/color_correction/utils/image_processing.py
@@ -1,7 +1,11 @@
+import base64
+import io
+
import colour as cl
import cv2
import numpy as np
from numpy.typing import NDArray
+from PIL import Image
ImageType = NDArray[np.uint8]
@@ -85,8 +89,25 @@ def calc_color_diff(
delta_e = cl.difference.delta_E(lab1, lab2, method="CIE 2000")
return {
- "min": float(np.min(delta_e)),
- "max": float(np.max(delta_e)),
- "mean": float(np.mean(delta_e)),
- "std": float(np.std(delta_e)),
+ "min": round(float(np.min(delta_e)), 4),
+ "max": round(float(np.max(delta_e)), 4),
+ "mean": round(float(np.mean(delta_e)), 4),
+ "std": round(float(np.std(delta_e)), 4),
}
+
+
+def numpy_array_to_base64(
+ arr: np.ndarray,
+ convert_bgr_to_rgb: bool = True,
+) -> str:
+ """Convert numpy array (image) to base64 string"""
+ if arr is None:
+ return ""
+
+ if convert_bgr_to_rgb:
+ arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
+ img = Image.fromarray(arr)
+ buffer = io.BytesIO()
+ img.save(buffer, format="PNG")
+ img_str = base64.b64encode(buffer.getvalue()).decode()
+ return f"data:image/png;base64,{img_str}"
diff --git a/color_correction/utils/report_generator.py b/color_correction/utils/report_generator.py
new file mode 100644
index 0000000..2969965
--- /dev/null
+++ b/color_correction/utils/report_generator.py
@@ -0,0 +1,82 @@
+# report_generator.py
+from datetime import datetime
+from importlib import resources
+
+import pandas as pd
+
+from color_correction.utils.formater import format_value
+
+
+class ReportGenerator:
+ def __init__(self) -> None:
+ self.template_dir = "color_correction.templates"
+
+ def _read_template(self, filename: str) -> str:
+ """Read template file content"""
+ with resources.files(self.template_dir).joinpath(filename).open("r") as f:
+ return f.read()
+
+ def generate_report(self, body_report: str) -> str:
+ """Generate full HTML report"""
+ # Load components
+ styles = self._read_template("style-report.css")
+ scripts = self._read_template("script-report.js")
+ column_desc = self._read_template("column_description.html")
+ base_template = self._read_template("base_report.html")
+
+ current_time = datetime.now().strftime("%d %B %Y, %H:%M:%S")
+
+ # Replace placeholders
+ final_html = base_template.format(
+ styles=styles,
+ scripts=scripts,
+ current_time=current_time,
+ body_report=body_report,
+ column_descriptions=column_desc,
+ )
+ return final_html
+
+ def generate_table(self, headers: list, rows: list) -> str:
+ """Generate table HTML"""
+ headers_html = "".join([f"{h} | " for h in headers])
+ rows_html = "".join([f"{r}
" for r in rows])
+
+ return f"""
+
+
+ {headers_html}
+
+
+ {rows_html}
+
+
+ """
+
+ def generate_html_report(self, df: pd.DataFrame, path_html: str) -> str:
+ """Generate HTML report from DataFrame with images"""
+ df_html = df.copy()
+
+ # Generate rows
+ rows = []
+ for _, row in df_html.iterrows():
+ row_html = ""
+ for value in row:
+ formatted_value = format_value(value)
+ row_html += f"| {formatted_value} | "
+ row_html += "
"
+ rows.append(row_html)
+
+ table_html = self.generate_table(
+ headers=df.columns.tolist(),
+ rows=rows,
+ )
+ report_html = self.generate_report(table_html)
+
+ with open(path_html, "w") as f:
+ f.write(report_html)
+
+ return report_html
+
+ def save_dataframe(self, df: pd.DataFrame, filepath: str) -> None:
+ """Save DataFrame with pickle to preserve numpy arrays"""
+ df.to_pickle(filepath)
diff --git a/pyproject.toml b/pyproject.toml
index 487b0e7..3f40028 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "color-correction"
-version = "0.0.1b2"
+version = "0.0.1b3"
description = "help to do color correction on images based on color checker card classic 24 patch."
keywords = ["color correction", "color-correction", "color consistency", "color-consistency", "vision", "computer vision"]
readme = "README.md"
@@ -22,6 +22,7 @@ dependencies = [
"opencv-python-headless>=4.11.0.86",
"onnx>=1.17.0",
"onnxruntime>=1.20.1",
+ "pandas>=2.2.3",
]
classifiers = [
'Programming Language :: Python :: 3 :: Only',
diff --git a/tests/utils/test_correction.py b/tests/utils/test_correction.py
new file mode 100644
index 0000000..6a8fbb4
--- /dev/null
+++ b/tests/utils/test_correction.py
@@ -0,0 +1,32 @@
+import numpy as np
+import pytest
+from color_correction.utils.correction import (
+ preprocessing_compute,
+ postprocessing_compute,
+)
+
+@pytest.mark.parametrize("input_image, expected_shape", [
+ # When shape is exactly (24, 3) and should be cast without reshaping
+ (np.random.rand(24, 3) * 255, (24, 3)),
+ # When shape is different; for example a 6x6 image with 3 channels should be reshaped to (36, 3)
+ (np.random.rand(6, 6, 3) * 255, (36, 3)),
+])
+def test_preprocessing_compute(input_image, expected_shape):
+ output = preprocessing_compute(input_image)
+ assert output.dtype == np.float32
+ assert output.shape == expected_shape
+
+@pytest.mark.parametrize("original_shape, predict_image, expected_shape", [
+ # Grid image patches (original_shape length 2); no reshape needed
+ ((24, 3), np.array([[300, -10, 100]] * 24), (24, 3)),
+ # Regular colored image: reshape required from flat list to (h, w, c)
+ ((4, 4, 3), np.array([[300, -10, 100]] * 16), (4, 4, 3)),
+])
+def test_postprocessing_compute(original_shape, predict_image, expected_shape):
+ output = postprocessing_compute(original_shape, predict_image)
+ # Output must be of type uint8 after clipping
+ assert output.dtype == np.uint8
+ assert output.shape == expected_shape
+ # Validate that all values are clipped between 0 and 255
+ assert output.min() >= 0
+ assert output.max() <= 255
diff --git a/tests/utils/test_formatter.py b/tests/utils/test_formatter.py
new file mode 100644
index 0000000..4e9658c
--- /dev/null
+++ b/tests/utils/test_formatter.py
@@ -0,0 +1,23 @@
+import json
+import numpy as np
+import pytest
+from color_correction.utils.formater import format_value
+
+@pytest.mark.parametrize("value,expected_check", [
+ # For numpy array image: expecting an
tag.
+ (np.array([[1, 2, 3]], dtype=np.uint8), lambda out: out.startswith('),
+ # For dict: expecting a valid json string that decodes back to the same dict.
+ ({)