diff --git a/petdeface/petdeface.py b/petdeface/petdeface.py
index bbc3266..ab5aa07 100755
--- a/petdeface/petdeface.py
+++ b/petdeface/petdeface.py
@@ -32,11 +32,13 @@
from mideface import Mideface
from pet import WeightedAverage
from utils import run_validator
+ from qa import run_qa
except ModuleNotFoundError:
from .mideface import ApplyMideface
from .mideface import Mideface
from .pet import WeightedAverage
from .utils import run_validator
+ from .qa import run_qa
# collect version from pyproject.toml
@@ -297,7 +299,7 @@ def deface(args: Union[dict, argparse.Namespace]) -> None:
write_out_dataset_description_json(args.bids_dir)
# remove temp outputs - this is commented out to enable easier testing for now
- if str(os.getenv("DEBUG", "false")).lower() != "true":
+ if str(os.getenv("PETDEFACE_DEBUG", "false")).lower() != "true":
shutil.rmtree(os.path.join(output_dir, "petdeface_wf"))
return {"subjects": subjects}
@@ -671,21 +673,21 @@ def wrap_up_defacing(
should_exclude = False
for excluded_subject in participant_label_exclude:
# Handle both cases: excluded_subject with or without 'sub-' prefix
- if excluded_subject.startswith('sub-'):
+ if excluded_subject.startswith("sub-"):
subject_pattern = f"/{excluded_subject}/"
subject_pattern_underscore = f"/{excluded_subject}_"
else:
subject_pattern = f"/sub-{excluded_subject}/"
subject_pattern_underscore = f"/sub-{excluded_subject}_"
-
+
if subject_pattern in entry or subject_pattern_underscore in entry:
should_exclude = True
break
-
+
# Skip excluded subject files, but copy everything else (including dataset-level files)
if should_exclude:
continue
-
+
copy_path = entry.replace(str(path_to_dataset), str(final_destination))
pathlib.Path(copy_path).parent.mkdir(
parents=True, exist_ok=True, mode=0o775
@@ -730,7 +732,7 @@ def wrap_up_defacing(
desc="defaced",
return_type="file",
)
- if str(os.getenv("DEBUG", "false")).lower() != "true":
+ if str(os.getenv("PETDEFAC_DEBUG", "false")).lower() != "true":
for extraneous in derivatives:
os.remove(extraneous)
@@ -741,15 +743,16 @@ def wrap_up_defacing(
"placement must be one of ['adjacent', 'inplace', 'derivatives']"
)
- # clean up any errantly leftover files with globe in destination folder
- leftover_files = [
- pathlib.Path(defaced_nii)
- for defaced_nii in glob.glob(
- f"{final_destination}/**/*_defaced*.nii*", recursive=True
- )
- ]
- for leftover in leftover_files:
- leftover.unlink()
+ if not os.getenv("PETDEFACE_DEBUG"):
+ # clean up any errantly leftover files with glob in destination folder
+ leftover_files = [
+ pathlib.Path(defaced_nii)
+ for defaced_nii in glob.glob(
+ f"{final_destination}/**/*_defaced*.nii*", recursive=True
+ )
+ ]
+ for leftover in leftover_files:
+ leftover.unlink()
print(f"completed copying dataset to {final_destination}")
@@ -770,7 +773,9 @@ def move_defaced_images(
:param move_files: delete defaced images in "working" directory, e.g. move them to the destination dir instead of copying them there, defaults to False
:type move_files: bool, optional
"""
- # update paths in mapping dict
+ # Create a new mapping with destination paths
+ dest_mapping = {}
+
for defaced, raw in mapping_dict.items():
# get common path and replace with final_destination to get new path
common_path = os.path.commonpath([defaced.path, raw.path])
@@ -791,15 +796,13 @@ def move_defaced_images(
]
)
)
- mapping_dict[defaced] = new_path
+ dest_mapping[defaced] = new_path
# copy defaced images to new location
- for defaced, raw in mapping_dict.items():
- if pathlib.Path(raw).exists() and pathlib.Path(defaced).exists():
- shutil.copy(defaced.path, raw)
- else:
- pathlib.Path(raw).parent.mkdir(parents=True, exist_ok=True)
- shutil.copy(defaced.path, raw)
+ for defaced, dest_path in dest_mapping.items():
+ if pathlib.Path(defaced).exists():
+ pathlib.Path(dest_path).parent.mkdir(parents=True, exist_ok=True)
+ shutil.copy(defaced.path, dest_path)
if move_files:
os.remove(defaced.path)
@@ -1056,6 +1059,12 @@ def cli():
required=False,
default=[],
)
+ parser.add_argument(
+ "--open-browser",
+ action="store_true",
+ default=False,
+ help="Open browser automatically after QA report generation",
+ )
arguments = parser.parse_args()
return arguments
@@ -1256,6 +1265,45 @@ def main(): # noqa: max-complexity: 12
)
petdeface.run()
+ # Generate QA reports if requested
+ print("\n" + "=" * 60)
+ print("Generating QA reports...")
+ print("=" * 60)
+
+ try:
+ # Determine the defaced directory based on placement
+ if args.placement == "adjacent":
+ defaced_dir = args.bids_dir.parent / f"{args.bids_dir.name}_defaced"
+ elif args.placement == "inplace":
+ defaced_dir = args.bids_dir
+ elif args.placement == "derivatives":
+ defaced_dir = args.bids_dir / "derivatives" / "petdeface"
+ else:
+ defaced_dir = (
+ args.output_dir
+ if args.output_dir
+ else args.bids_dir / "derivatives" / "petdeface"
+ )
+
+ # Run QA
+ qa_result = run_qa(
+ faced_dir=str(args.bids_dir),
+ defaced_dir=str(defaced_dir),
+ subject=(
+ " ".join(args.participant_label) if args.participant_label else None
+ ),
+ open_browser=args.open_browser,
+ )
+
+ print("\n" + "=" * 60)
+ print("QA reports generated successfully!")
+ print(f"Reports available at: {qa_result['output_dir']}")
+ print("=" * 60)
+
+ except Exception as e:
+ print(f"\nError generating QA reports: {e}")
+ print("QA report generation failed, but defacing completed successfully.")
+
if __name__ == "__main__":
main()
diff --git a/petdeface/qa.py b/petdeface/qa.py
new file mode 100644
index 0000000..4c21f5f
--- /dev/null
+++ b/petdeface/qa.py
@@ -0,0 +1,1462 @@
+import argparse
+import os
+import tempfile
+import shutil
+from glob import glob
+import nilearn
+from nilearn import plotting
+import webbrowser
+import nibabel as nib
+import numpy as np
+from nilearn import image
+import matplotlib.pyplot as plt
+from matplotlib.animation import FuncAnimation
+import imageio
+import multiprocessing as mp
+from functools import partial
+import seaborn as sns
+from PIL import Image, ImageDraw
+from nipype import Workflow, Node
+from nireports.interfaces.reporting.base import SimpleBeforeAfterRPT
+from tempfile import TemporaryDirectory
+from pathlib import Path
+
+
+def preprocess_single_subject(s, output_dir):
+ """Preprocess a single subject's images (for parallel processing)."""
+ temp_dir = os.path.join(output_dir, "temp_3d_images")
+
+ # Extract BIDS suffix from original path to preserve meaningful naming
+ orig_basename = os.path.basename(s["orig_path"])
+ defaced_basename = os.path.basename(s["defaced_path"])
+
+ # Extract the meaningful part (e.g., "sub-01_ses-baseline_T1w" or "sub-01_ses-baseline_pet")
+ def extract_bids_name(basename):
+ # Remove .nii.gz extension
+ name = basename.replace(".nii.gz", "").replace(".nii", "")
+ return name
+
+ orig_bids_name = extract_bids_name(orig_basename)
+ defaced_bids_name = extract_bids_name(defaced_basename)
+
+ # Preprocess original image
+ orig_result = load_and_preprocess_image(s["orig_path"])
+ if isinstance(orig_result, nib.Nifti1Image):
+ # Need to save the averaged image with meaningful name
+ orig_3d_path = os.path.join(temp_dir, f"orig_{orig_bids_name}.nii.gz")
+ nib.save(orig_result, orig_3d_path)
+ orig_img = orig_result
+ else:
+ # Already 3D, use original path and load image
+ orig_3d_path = orig_result
+ orig_img = nib.load(orig_result)
+
+ # Preprocess defaced image
+ defaced_result = load_and_preprocess_image(s["defaced_path"])
+ if isinstance(defaced_result, nib.Nifti1Image):
+ # Need to save the averaged image with meaningful name
+ defaced_3d_path = os.path.join(temp_dir, f"defaced_{defaced_bids_name}.nii.gz")
+ nib.save(defaced_result, defaced_3d_path)
+ defaced_img = defaced_result
+ else:
+ # Already 3D, use original path and load image
+ defaced_3d_path = defaced_result
+ defaced_img = nib.load(defaced_result)
+
+ # Create new subject dict with preprocessed paths (update paths to 3D versions)
+ preprocessed_subject = {
+ "id": s["id"],
+ "orig_path": orig_3d_path, # Update to 3D path
+ "defaced_path": defaced_3d_path, # Update to 3D path
+ "orig_img": orig_img, # Keep loaded image for direct use
+ "defaced_img": defaced_img, # Keep loaded image for direct use
+ }
+
+ print(f" Preprocessed {s['id']}")
+ return preprocessed_subject
+
+
+def preprocess_images(subjects: dict, output_dir, n_jobs=None):
+ """Preprocess all images once: load and convert 4D to 3D if needed."""
+ print("Preprocessing images (4D→3D conversion)...")
+
+ # Create temp directory
+ temp_dir = os.path.join(output_dir, "temp_3d_images")
+ os.makedirs(temp_dir, exist_ok=True)
+
+ # Set number of jobs for parallel processing
+ if n_jobs is None:
+ n_jobs = mp.cpu_count()
+ print(f"Using {n_jobs} parallel processes for preprocessing")
+
+ # Process subjects in parallel
+ with mp.Pool(processes=n_jobs) as pool:
+ # Create a partial function with the output_dir fixed
+ preprocess_func = partial(preprocess_single_subject, output_dir=output_dir)
+
+ # Process all subjects in parallel
+ preprocessed_subjects = pool.map(preprocess_func, subjects)
+
+ print(f"Preprocessed {len(preprocessed_subjects)} subjects")
+ return preprocessed_subjects
+
+
+def generate_simple_before_and_after(preprocessed_subjects: dict, output_dir):
+ if not output_dir:
+ output_dir = TemporaryDirectory()
+ wf = Workflow(
+ name="simple_before_after_report", base_dir=Path(output_dir) / "images/"
+ )
+
+ # Create a list to store all nodes
+ nodes = []
+
+ for s in preprocessed_subjects:
+ # only run this on the T1w images for now
+ if "T1w" in s["orig_path"]:
+ o_path = Path(s["orig_path"])
+ # Create a valid node name by replacing invalid characters but preserving session info
+ # Use the full path to ensure uniqueness
+ path_parts = s["orig_path"].split(os.sep)
+ subject_part = next(
+ (p for p in path_parts if p.startswith("sub-")), s["id"]
+ )
+ session_part = next((p for p in path_parts if p.startswith("ses-")), "")
+
+ if session_part:
+ valid_name = f"before_after_{subject_part}_{session_part}".replace(
+ "-", "_"
+ )
+ else:
+ valid_name = f"before_after_{subject_part}".replace("-", "_")
+ node = Node(
+ SimpleBeforeAfterRPT(
+ before=s["orig_path"],
+ after=s["defaced_path"],
+ before_label="Original",
+ after_label="Defaced",
+ out_report=f"{s['id']}_simple_before_after.svg",
+ ),
+ name=valid_name,
+ )
+ nodes.append(node)
+
+ # Add all nodes to the workflow
+ wf.add_nodes(nodes)
+
+ # Only run if we have nodes to process
+ if nodes:
+ wf.run(plugin="MultiProc", plugin_args={"n_procs": mp.cpu_count()})
+ # Collect SVG files and move them to images folder
+ collect_svg_reports(wf, output_dir)
+ else:
+ print("No T1w images found for SVG report generation")
+
+
+def collect_svg_reports(wf, output_dir):
+ """Collect SVG reports from workflow and move them to images folder."""
+ import glob
+
+ # Find all SVG files in the workflow directory
+ workflow_dir = wf.base_dir
+ svg_files = glob.glob(os.path.join(workflow_dir, "**", "*.svg"), recursive=True)
+
+ print(f"Found {len(svg_files)} SVG reports")
+
+ # Move each SVG to the images folder
+ for svg_file in svg_files:
+ filename = os.path.basename(svg_file)
+ dest_path = os.path.join(output_dir, "images", filename)
+ shutil.move(svg_file, dest_path)
+ print(f" Moved: {filename}")
+
+ # Create HTML page for SVG reports
+ create_svg_index_html(svg_files, output_dir)
+
+
+def create_svg_index_html(svg_files, output_dir):
+ """Create HTML index page for SVG reports."""
+
+ svg_entries = ""
+ for svg_file in svg_files:
+ filename = os.path.basename(svg_file)
+ subject_id = filename.replace("_simple_before_after.svg", "")
+
+ svg_entries += f"""
+
+
{subject_id}
+
+
+ """
+
+ html_content = f"""
+
+
+
+
+ PET Deface SVG Reports
+
+
+
+
+
+
+
+
+ Use arrow keys or spacebar
+
+
+
+
+
+
+ {svg_entries}
+
+
+
+
+
+ """
+
+ svg_index_file = os.path.join(output_dir, "SimpleBeforeAfterRPT.html")
+ with open(svg_index_file, "w") as f:
+ f.write(html_content)
+
+ print(f"Created SVG reports index: {svg_index_file}")
+
+
+def create_overlay_comparison(orig_path, defaced_path, subject_id, output_dir):
+ """Create overlay comparison with original as background and defaced as overlay."""
+
+ # Load images
+ orig_img = image.load_img(orig_path)
+ defaced_img = image.load_img(defaced_path)
+
+ # Create overlay plot
+ fig = plotting.plot_anat(
+ orig_img,
+ title=f"Overlay: Original (background) + Defaced (overlay) - {subject_id}",
+ display_mode="ortho",
+ cut_coords=(0, 0, 0),
+ colorbar=True,
+ annotate=True,
+ )
+
+ # Add defaced as overlay
+ plotting.plot_roi(defaced_img, bg_img=orig_img, figure=fig, alpha=0.7, color="red")
+
+ # Save overlay
+ overlay_file = os.path.join(output_dir, f"overlay_{subject_id}.png")
+ fig.savefig(overlay_file, dpi=150)
+ fig.close()
+
+ return overlay_file
+
+
+def create_animated_gif(orig_path, defaced_path, subject_id, output_dir, n_slices=20):
+ """Create animated GIF showing different slices through the volume."""
+
+ # Load images
+ orig_img = image.load_img(orig_path)
+ defaced_img = image.load_img(defaced_path)
+
+ # Get data
+ orig_data = orig_img.get_fdata()
+ defaced_data = defaced_img.get_fdata()
+
+ # Create figure for animation
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
+ fig.suptitle(f"Animated Comparison - {subject_id}", fontsize=16)
+
+ # Initialize plots
+ slice_idx = orig_data.shape[2] // 2
+ im1 = ax1.imshow(orig_data[:, :, slice_idx], cmap="gray")
+ im2 = ax2.imshow(defaced_data[:, :, slice_idx], cmap="hot")
+
+ ax1.set_title("Original")
+ ax2.set_title("Defaced")
+ ax1.axis("off")
+ ax2.axis("off")
+
+ def animate(frame):
+ slice_idx = int(frame * orig_data.shape[2] / n_slices)
+ im1.set_array(orig_data[:, :, slice_idx])
+ im2.set_array(defaced_data[:, :, slice_idx])
+ return [im1, im2]
+
+ # Create animation
+ anim = FuncAnimation(fig, animate, frames=n_slices, interval=200, blit=True)
+
+ # Save as GIF
+ gif_file = os.path.join(output_dir, f"animation_{subject_id}.gif")
+ anim.save(gif_file, writer="pillow", fps=5)
+ plt.close()
+
+ return gif_file
+
+
+def create_overlay_gif(image_files, subject_id, output_dir):
+ """Create an animated GIF switching between original and defaced."""
+
+ # Load the PNG images
+ orig_png_path = os.path.join(
+ output_dir, "images", image_files[0][2]
+ ) # original image
+ defaced_png_path = os.path.join(
+ output_dir, "images", image_files[1][2]
+ ) # defaced image
+
+ # Open images
+ orig_img = Image.open(orig_png_path)
+ defaced_img = Image.open(defaced_png_path)
+
+ # Ensure same size
+ if orig_img.size != defaced_img.size:
+ defaced_img = defaced_img.resize(orig_img.size, Image.Resampling.LANCZOS)
+
+ # Create frames for the GIF
+ frames = []
+
+ # Frame 1: Original
+ frames.append(orig_img.copy())
+
+ # Frame 2: Defaced
+ frames.append(defaced_img.copy())
+
+ # Save as GIF
+ gif_filename = f"overlay_{subject_id}.gif"
+ gif_path = os.path.join(output_dir, "images", gif_filename)
+ frames[0].save(
+ gif_path,
+ save_all=True,
+ append_images=frames[1:],
+ duration=1500, # 1.5 seconds per frame
+ loop=0,
+ )
+
+ return gif_filename
+
+
+def load_and_preprocess_image(img_path):
+ """Load image and take mean if it has more than 3 dimensions.
+ Returns nibabel image if averaging was needed, otherwise returns original path."""
+ img = nib.load(img_path)
+
+ # Check if image has more than 3 dimensions
+ if len(img.shape) > 3:
+ print(
+ f" Converting 4D image to 3D by taking mean: {os.path.basename(img_path)}"
+ )
+ # Take mean across the 4th dimension
+ data = img.get_fdata()
+ mean_data = np.mean(data, axis=3)
+ # Create new 3D image
+ img = nib.Nifti1Image(mean_data, img.affine, img.header)
+ return img # Return nibabel image object
+ else:
+ return img_path # Return original path if already 3D
+
+
+def create_comparison_html(
+ orig_img,
+ defaced_img,
+ subject_id,
+ output_dir,
+ display_mode="side-by-side",
+ size="compact",
+ orig_path=None,
+ defaced_path=None,
+):
+ """Create HTML comparison page for a subject using nilearn ortho views."""
+
+ # Get basenames for display - use actual filenames with BIDS suffixes if available
+ if orig_path and defaced_path:
+ orig_basename = os.path.basename(orig_path)
+ defaced_basename = os.path.basename(defaced_path)
+ else:
+ # Fallback to generic names if paths not provided
+ orig_basename = f"orig_{subject_id}.nii.gz"
+ defaced_basename = f"defaced_{subject_id}.nii.gz"
+
+ # Generate images and get their filenames
+ image_files = []
+ for label, img, basename, cmap in [
+ ("original", orig_img, orig_basename, "hot"), # Colored for original
+ ("defaced", defaced_img, defaced_basename, "gray"), # Grey for defaced
+ ]:
+ # save image to temp folder for later loading
+
+ # Create single sagittal slice using matplotlib directly
+ img_data = img.get_fdata()
+ x_midpoint = img_data.shape[0] // 2 # Get middle slice index
+
+ # Extract the sagittal slice and rotate it properly using matrix multiplication
+ sagittal_slice = img_data[x_midpoint, :, :]
+
+ # Create 270-degree rotation matrix (to face left and right-side up)
+ angle_rad = np.radians(270)
+ cos_theta = np.cos(angle_rad)
+ sin_theta = np.sin(angle_rad)
+ rotation_matrix = np.array([[cos_theta, -sin_theta], [sin_theta, cos_theta]])
+
+ # Get image dimensions
+ h, w = sagittal_slice.shape
+
+ # Create coordinate grid
+ y, x = np.mgrid[0:h, 0:w]
+ coords = np.vstack([x.ravel(), y.ravel()])
+
+ # Center the coordinates
+ center = np.array([w / 2, h / 2]).reshape(2, 1)
+ coords_centered = coords - center
+
+ # Apply rotation
+ rotated_coords = rotation_matrix @ coords_centered
+
+ # Move back to original coordinate system
+ rotated_coords = rotated_coords + center
+
+ # Interpolate the rotated image
+ from scipy.interpolate import griddata
+
+ rotated_slice = griddata(
+ (rotated_coords[0], rotated_coords[1]),
+ sagittal_slice.ravel(),
+ (x, y),
+ method="linear",
+ fill_value=0,
+ )
+
+ # Crop the image to remove empty black space
+ # Find non-zero regions
+ non_zero_mask = rotated_slice > 0
+ if np.any(non_zero_mask):
+ # Get bounding box of non-zero pixels
+ rows = np.any(non_zero_mask, axis=1)
+ cols = np.any(non_zero_mask, axis=0)
+ rmin, rmax = np.where(rows)[0][[0, -1]]
+ cmin, cmax = np.where(cols)[0][[0, -1]]
+
+ # Add some padding
+ padding = 10
+ rmin = max(0, rmin - padding)
+ rmax = min(rotated_slice.shape[0], rmax + padding)
+ cmin = max(0, cmin - padding)
+ cmax = min(rotated_slice.shape[1], cmax + padding)
+
+ # Crop the image
+ cropped_slice = rotated_slice[rmin:rmax, cmin:cmax]
+ else:
+ cropped_slice = rotated_slice
+
+ # Create figure with matplotlib
+ fig, ax = plt.subplots(figsize=(8, 8))
+ im = ax.imshow(cropped_slice, cmap=cmap, aspect="equal")
+ ax.set_title(f"{label.title()}: {basename} ({cmap} colormap)")
+ ax.axis("off")
+
+ # Save as PNG
+ png_filename = f"{label}_{subject_id}.png"
+ png_path = os.path.join(output_dir, "images", png_filename)
+ fig.savefig(png_path, dpi=150)
+ plt.close(fig)
+
+ image_files.append((label, basename, png_filename))
+
+ # Create overlay GIF if we have both images
+ if len(image_files) == 2:
+ overlay_gif = create_overlay_gif(image_files, subject_id, output_dir)
+ image_files.append(("overlay", "comparison", overlay_gif))
+
+ # Create the comparison HTML with embedded images
+ html_content = f"""
+
+
+
+
+ PET Deface Comparison - {subject_id}
+
+
+
+
+
+
+
+
+ Use arrow keys or spacebar
+
+
+
+
+
+
+ """
+
+ # Add content based on display mode
+ if display_mode == "side-by-side":
+ # Add images side by side only
+ html_content += f"""
+
+
{image_files[0][0].title()}: {image_files[0][1]}
+
![{image_files[0][0].title()}: {image_files[0][1]}]({image_files[0][2]})
+
+
+
{image_files[1][0].title()}: {image_files[1][1]}
+
![{image_files[1][0].title()}: {image_files[1][1]}]({image_files[1][2]})
+
+
+ """
+ elif display_mode == "gif":
+ # Show only the GIF
+ if len(image_files) > 2:
+ html_content += f"""
+
+
Animated Comparison
+
Switching between Original and Defaced images
+

+
+
+ """
+ else:
+ html_content += """
+
+ """
+
+ html_content += """
+
+
+
+ """
+
+ # Save the comparison HTML
+ comparison_file = os.path.join(output_dir, f"comparison_{subject_id}.html")
+ with open(comparison_file, "w") as f:
+ f.write(html_content)
+
+ return comparison_file
+
+
+def process_subject(subject, output_dir, size="compact"):
+ """Process a single subject (for parallel processing)."""
+ print(f"Processing {subject['id']}...")
+ try:
+ comparison_file = create_comparison_html(
+ subject["orig_img"],
+ subject["defaced_img"],
+ subject["id"],
+ output_dir,
+ "side-by-side", # Always generate side-by-side for individual pages
+ size,
+ subject["orig_path"],
+ subject["defaced_path"],
+ )
+ print(f" Completed: {subject['id']}")
+ return comparison_file
+ except Exception as e:
+ print(f" Error processing {subject['id']}: {e}")
+ return None
+
+
+def build_subjects_from_datasets(orig_dir, defaced_dir):
+ """Build subject list with file paths."""
+
+ # Get all NIfTI files but exclude derivatives and workflow folders
+ def get_nifti_files(directory):
+ all_files = glob(os.path.join(directory, "**", "*.nii*"), recursive=True)
+ # Filter out files in derivatives, workflow, or other processing folders
+ filtered_files = []
+ for file_path in all_files:
+ # Skip files in derivatives, workflow, or processing-related directories
+ path_parts = file_path.split(os.sep)
+ skip_dirs = ["derivatives", "work", "wf", "tmp", "temp", "scratch", "cache"]
+ if not any(skip_dir in path_parts for skip_dir in skip_dirs):
+ filtered_files.append(file_path)
+ return filtered_files
+
+ orig_files = get_nifti_files(orig_dir)
+ defaced_files = get_nifti_files(defaced_dir)
+
+ def strip_ext(path):
+ base = os.path.basename(path)
+ if base.endswith(".gz"):
+ base = os.path.splitext(os.path.splitext(base)[0])[0]
+ else:
+ base = os.path.splitext(base)[0]
+ return base
+
+ def get_unique_key(file_path):
+ """Create a unique key that includes session information."""
+ parts = file_path.split(os.sep)
+ sub_id = next((p for p in parts if p.startswith("sub-")), "")
+ session = next((p for p in parts if p.startswith("ses-")), "")
+ basename = strip_ext(file_path)
+
+ # Create unique key that includes session if present
+ if session:
+ return f"{sub_id}_{session}_{basename}"
+ else:
+ return f"{sub_id}_{basename}"
+
+ # Create maps with unique keys
+ orig_map = {get_unique_key(f): f for f in orig_files}
+ defaced_map = {get_unique_key(f): f for f in defaced_files}
+ common_keys = sorted(set(orig_map.keys()) & set(defaced_map.keys()))
+
+ subjects = []
+ for key in common_keys:
+ orig_path = orig_map[key]
+ defaced_path = defaced_map[key]
+ parts = orig_path.split(os.sep)
+ sub_id = next((p for p in parts if p.startswith("sub-")), key)
+ session = next((p for p in parts if p.startswith("ses-")), "")
+
+ # Create a unique subject ID that includes session if present
+ if session:
+ subject_id = f"{sub_id}_{session}"
+ else:
+ subject_id = sub_id
+
+ subjects.append(
+ {
+ "id": subject_id,
+ "orig_path": orig_path,
+ "defaced_path": defaced_path,
+ }
+ )
+
+ if not subjects:
+ print("No matching NIfTI files found in both datasets.")
+ exit(1)
+
+ return subjects
+
+
+def create_side_by_side_index_html(subjects, output_dir, size="compact"):
+ """Create index page for side-by-side comparisons."""
+
+ comparisons_html = ""
+ for subject in subjects:
+ subject_id = subject["id"]
+
+ # Use actual filenames with BIDS suffixes instead of generic names
+ orig_basename = os.path.basename(subject["orig_path"])
+ defaced_basename = os.path.basename(subject["defaced_path"])
+
+ # Check if the PNG files exist
+ orig_png = f"images/original_{subject_id}.png"
+ defaced_png = f"images/defaced_{subject_id}.png"
+
+ comparisons_html += f"""
+
+
{subject_id}
+
+
+
Original: {orig_basename}
+

+
+
+
Defaced: {defaced_basename}
+

+
+
+
+ """
+
+ html_content = f"""
+
+
+
+
+ PET Deface Comparisons - Side by Side
+
+
+
+
+
+
+
+
+ Use arrow keys or spacebar
+
+
+
+
+
+
+ {comparisons_html}
+
+
+
+ """
+
+ index_file = os.path.join(output_dir, "side_by_side.html")
+ with open(index_file, "w") as f:
+ f.write(html_content)
+
+ return index_file
+
+
+def create_gif_index_html(subjects, output_dir, size="compact"):
+ """Create index page for GIF comparisons."""
+
+ comparisons_html = ""
+ for subject in subjects:
+ subject_id = subject["id"]
+ overlay_gif = f"images/overlay_{subject_id}.gif"
+
+ comparisons_html += f"""
+
+
{subject_id}
+
+
Animated Comparison
+
Switching between Original and Defaced images
+

+
+
+ """
+
+ html_content = f"""
+
+
+
+
+ PET Deface Comparisons - Animated
+
+
+
+
+
+
+
+
+ Use arrow keys or spacebar
+
+
+
+
+
+
+ {comparisons_html}
+
+
+
+ """
+
+ index_file = os.path.join(output_dir, "animated.html")
+ with open(index_file, "w") as f:
+ f.write(html_content)
+
+ return index_file
+
+
+def run_qa(
+ faced_dir,
+ defaced_dir,
+ output_dir=None,
+ subject=None,
+ n_jobs=None,
+ size="compact",
+ open_browser=False,
+):
+ """
+ Run QA report generation programmatically.
+
+ Args:
+ faced_dir (str): Path to original (faced) dataset directory
+ defaced_dir (str): Path to defaced dataset directory
+ output_dir (str, optional): Output directory for HTML files
+ subject (str, optional): Filter to specific subject
+ n_jobs (int, optional): Number of parallel jobs
+ size (str): Image size - 'compact' or 'full'
+ open_browser (bool): Whether to open browser automatically
+
+ Returns:
+ dict: Information about generated files
+ """
+ faced_dir = os.path.abspath(faced_dir)
+ defaced_dir = os.path.abspath(defaced_dir)
+
+ # Create output directory name based on input directories
+ if output_dir:
+ output_dir = os.path.abspath(output_dir)
+ else:
+ orig_folder = os.path.basename(faced_dir)
+ defaced_folder = os.path.basename(defaced_dir)
+ output_dir = os.path.abspath(f"{orig_folder}_{defaced_folder}_qa")
+
+ # Create output directory and images subdirectory
+ os.makedirs(output_dir, exist_ok=True)
+ os.makedirs(os.path.join(output_dir, "images"), exist_ok=True)
+ print(f"Output directory: {output_dir}")
+
+ # Build subjects list
+ subjects = build_subjects_from_datasets(faced_dir, defaced_dir)
+ print(f"Found {len(subjects)} subjects with matching files")
+
+ # Filter to specific subject if requested
+ if subject:
+ original_count = len(subjects)
+ subjects = [s for s in subjects if subject in s["id"]]
+ print(
+ f"Filtered to {len(subjects)} subjects matching '{subject}' (from {original_count} total)"
+ )
+
+ if not subjects:
+ print(f"No subjects found matching '{subject}'")
+ print("Available subjects:")
+ all_subjects = build_subjects_from_datasets(faced_dir, defaced_dir)
+ for s in all_subjects:
+ print(f" - {s['id']}")
+ raise ValueError(f"No subjects found matching '{subject}'")
+
+ # Set number of jobs for parallel processing
+ if n_jobs is None:
+ n_jobs = mp.cpu_count()
+ print(f"Using {n_jobs} parallel processes")
+
+ # Preprocess all images once (4D→3D conversion)
+ preprocessed_subjects = preprocess_images(subjects, output_dir, n_jobs)
+
+ # create nireports svg's for comparison
+ generate_simple_before_and_after(
+ preprocessed_subjects=preprocessed_subjects, output_dir=output_dir
+ )
+
+ # Process subjects in parallel
+ print("Generating comparisons...")
+ with mp.Pool(processes=n_jobs) as pool:
+ # Create a partial function with the output_dir and size fixed
+ process_func = partial(
+ process_subject,
+ output_dir=output_dir,
+ size=size,
+ )
+
+ # Process all subjects in parallel
+ results = pool.map(process_func, preprocessed_subjects)
+
+ # Count successful results
+ successful = [r for r in results if r is not None]
+ print(
+ f"Successfully processed {len(successful)} out of {len(preprocessed_subjects)} subjects"
+ )
+
+ # Create both HTML files
+ side_by_side_file = create_side_by_side_index_html(
+ preprocessed_subjects, output_dir, size
+ )
+ animated_file = create_gif_index_html(preprocessed_subjects, output_dir, size)
+
+ # Create a simple index that links to both
+ index_html = f"""
+
+
+
+
+ PET Deface Comparisons
+
+
+
+
+
+
+ """
+
+ index_file = os.path.join(output_dir, "index.html")
+ with open(index_file, "w") as f:
+ f.write(index_html)
+
+ # Save the command with full expanded paths
+ import sys
+
+ command_parts = [
+ sys.executable,
+ os.path.abspath(__file__),
+ "--faced-dir",
+ faced_dir,
+ "--defaced-dir",
+ defaced_dir,
+ "--output-dir",
+ output_dir,
+ "--size",
+ size,
+ ]
+ if n_jobs:
+ command_parts.extend(["--n-jobs", str(n_jobs)])
+ if subject:
+ command_parts.extend(["--subject", subject])
+ if open_browser:
+ command_parts.append("--open-browser")
+
+ command_str = " ".join(command_parts)
+
+ command_file = os.path.join(output_dir, "command.txt")
+ with open(command_file, "w") as f:
+ f.write(f"# Command used to generate this comparison\n")
+ f.write(
+ f"# Generated on: {__import__('datetime').datetime.now().isoformat()}\n\n"
+ )
+ f.write(command_str + "\n")
+
+ print(f"Created side-by-side view: {side_by_side_file}")
+ print(f"Created animated view: {animated_file}")
+ print(f"Created main index: {index_file}")
+ print(f"Saved command to: {command_file}")
+
+ # Open browser if requested
+ if open_browser:
+ webbrowser.open(f"file://{index_file}")
+ print(f"Opened browser to: {index_file}")
+
+ print(f"\nAll files generated in: {output_dir}")
+ print(f"Open index.html in your browser to view comparisons")
+
+ return {
+ "output_dir": output_dir,
+ "side_by_side_file": side_by_side_file,
+ "animated_file": animated_file,
+ "index_file": index_file,
+ "command_file": command_file,
+ "subjects_processed": len(successful),
+ "total_subjects": len(preprocessed_subjects),
+ }
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Generate static HTML comparisons of PET deface results using nilearn."
+ )
+ parser.add_argument(
+ "--faced-dir", required=True, help="Directory for original (faced) dataset"
+ )
+ parser.add_argument(
+ "--defaced-dir", required=True, help="Directory for defaced dataset"
+ )
+ parser.add_argument(
+ "--output-dir",
+ help="Output directory for HTML files (default: {orig_folder}_{defaced_folder}_qa)",
+ )
+ parser.add_argument(
+ "--open-browser", action="store_true", help="Open browser automatically"
+ )
+ parser.add_argument(
+ "--n-jobs",
+ type=int,
+ default=None,
+ help="Number of parallel jobs (default: all cores)",
+ )
+ parser.add_argument(
+ "--subject",
+ type=str,
+ help="Filter to specific subject (e.g., 'sub-01' or 'sub-01_ses-baseline')",
+ )
+
+ parser.add_argument(
+ "--size",
+ type=str,
+ choices=["compact", "full"],
+ default="compact",
+ help="Image size: 'compact' for closer together or 'full' for entire page width",
+ )
+ args = parser.parse_args()
+
+ return run_qa(
+ faced_dir=args.faced_dir,
+ defaced_dir=args.defaced_dir,
+ output_dir=args.output_dir,
+ subject=args.subject,
+ n_jobs=args.n_jobs,
+ size=args.size,
+ open_browser=args.open_browser,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/poetry.lock b/poetry.lock
index d5251b5..b397c86 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,19 +1,16 @@
-# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
[[package]]
name = "acres"
-version = "0.1.0"
+version = "0.5.0"
description = "Access resources on your terms"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "acres-0.1.0-py3-none-any.whl", hash = "sha256:7bbb3744de84d1499e5cc00f02d10f7e85c880e343722871816ca41502d4d103"},
- {file = "acres-0.1.0.tar.gz", hash = "sha256:4765479683389849368947da9e5319e677e7323ed858d642f9736ad1c070f45b"},
+ {file = "acres-0.5.0-py3-none-any.whl", hash = "sha256:fcc32b974b510897de0f041609b4234f9ff03e2e960aea088f63973fb106c772"},
+ {file = "acres-0.5.0.tar.gz", hash = "sha256:128b6447bf5df3b6210264feccbfa018b4ac5bd337358319aec6563f99db8f3a"},
]
-[package.dependencies]
-importlib_resources = {version = "*", markers = "python_version < \"3.11\""}
-
[[package]]
name = "alabaster"
version = "0.7.16"
@@ -1512,6 +1509,17 @@ plotting = ["kaleido", "kaleido (==0.1.0.post1)", "matplotlib (>=3.3.0)", "plotl
style = ["black", "blacken-docs", "codespell", "flake8", "flake8-docstrings", "flake8-functions", "flake8-use-fstring", "flynt", "isort", "tomli"]
test = ["coverage", "pytest (>=6.0.0)", "pytest-cov"]
+[[package]]
+name = "nipreps"
+version = "1.0"
+description = "Namespace package for nipreps utilities"
+optional = false
+python-versions = "*"
+files = [
+ {file = "nipreps-1.0-py2.py3-none-any.whl", hash = "sha256:9ff935d98301cc36633487674cc66128c52da2ce8e92a5871cc462704bb0e3d8"},
+ {file = "nipreps-1.0.tar.gz", hash = "sha256:944e7e55238e1db838c9647eecfff012cae982fc023d7b8042279c044209b7c2"},
+]
+
[[package]]
name = "nipype"
version = "1.9.1"
@@ -1557,6 +1565,40 @@ ssh = ["paramiko"]
tests = ["coverage (>=5.2.1)", "pandas (>=1.5.0)", "pytest (>=6)", "pytest-cov (>=2.11)", "pytest-doctestplus", "pytest-env", "pytest-timeout (>=1.4)", "pytest-xdist (>=2.5)", "sphinx (>=7)"]
xvfbwrapper = ["xvfbwrapper"]
+[[package]]
+name = "nireports"
+version = "25.2.0"
+description = "NiReports - The Visual Report System (VRS) of NiPreps"
+optional = false
+python-versions = ">=3.10"
+files = [
+ {file = "nireports-25.2.0-py3-none-any.whl", hash = "sha256:9d29c67c88782ae5cfe29442c1e86123c00e9c84f7bc348537c7e8a83976fa7e"},
+ {file = "nireports-25.2.0.tar.gz", hash = "sha256:e82cd93ed845180dc7a4e4b07bbeede83c46c2b456210c6741e178c286e6c26f"},
+]
+
+[package.dependencies]
+acres = ">=0.2"
+lxml = ">=4.6"
+matplotlib = ">=3.5"
+nibabel = ">=3.0.1"
+nilearn = ">=0.8"
+nipype = ">=1.8.5"
+numpy = ">=1.20"
+pandas = ">=1.2"
+pybids = ">=0.15.1"
+pyyaml = ">=5.4"
+seaborn = ">=0.13"
+templateflow = ">=23.1"
+
+[package.extras]
+all = ["coverage[toml] (>=5.2.1)", "furo", "packaging", "pre-commit", "pydot (>=1.2.3)", "pydotplus", "pytest (>=6)", "pytest-cov (>=2.11)", "pytest-env", "pytest-xdist (>=2.5)", "ruff", "sphinx", "sphinx (>=6)", "sphinxcontrib-apidoc", "sphinxcontrib-napoleon"]
+dev = ["pre-commit", "ruff"]
+doc = ["furo", "pydot (>=1.2.3)", "pydotplus", "sphinx", "sphinxcontrib-apidoc", "sphinxcontrib-napoleon"]
+docs = ["furo", "pydot (>=1.2.3)", "pydotplus", "sphinx", "sphinxcontrib-apidoc", "sphinxcontrib-napoleon"]
+test = ["coverage[toml] (>=5.2.1)", "packaging", "pytest (>=6)", "pytest-cov (>=2.11)", "pytest-env", "pytest-xdist (>=2.5)", "sphinx (>=6)"]
+tests = ["coverage[toml] (>=5.2.1)", "packaging", "pytest (>=6)", "pytest-cov (>=2.11)", "pytest-env", "pytest-xdist (>=2.5)", "sphinx (>=6)"]
+types = ["lxml-stubs", "pandas-stubs", "pytest", "scipy-stubs", "types-jinja2", "types-pyyaml"]
+
[[package]]
name = "nitransforms"
version = "24.1.0"
@@ -3261,4 +3303,4 @@ files = [
[metadata]
lock-version = "2.0"
python-versions = ">=3.10, <4.0"
-content-hash = "6de8a7cc6be569cb4483d6784cb54ac7a6ca8cc97ccbb1dee8ee18f41895bd43"
+content-hash = "980a7566badf67e01072461de1f2e76d23db2238b660a4aa4d44181073092a8e"
diff --git a/pyproject.toml b/pyproject.toml
index 3dfdec5..c37aa7a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,6 +21,16 @@ petutils = "^0.0.1"
niworkflows = "^1.11.0"
niftifixer = {git = "https://github.com/openneuropet/nifti_fixer.git"}
bids-validator-deno = "^2.0.5"
+nipreps = "^1.0"
+nireports = "^25.2.0"
+nibabel = "^5.3.2"
+nilearn = "^0.10.4"
+matplotlib = "^3.9.2"
+numpy = "^2.1.3"
+scipy = "^1.14.1"
+seaborn = "^0.13.2"
+pillow = "^11.0.0"
+imageio = "^2.36.0"
[tool.poetry.group.dev.dependencies]
diff --git a/tests/test_dir_layouts.py b/tests/test_dir_layouts.py
index 08e7c53..5b0609c 100644
--- a/tests/test_dir_layouts.py
+++ b/tests/test_dir_layouts.py
@@ -129,7 +129,7 @@ def test_participant_exclusion():
"""Test that participant exclusion works correctly by excluding sub-02"""
with tempfile.TemporaryDirectory() as temp_dir:
test_dir = Path(temp_dir)
-
+
# Create the test directory and copy our data
shutil.copytree(data_dir, test_dir / "participant_exclusion")
@@ -145,37 +145,45 @@ def test_participant_exclusion():
# Check the final defaced dataset directory
final_defaced_dir = test_dir / "participant_exclusion_defaced"
-
+
# Count files in the final defaced dataset
all_files = list(final_defaced_dir.rglob("*"))
all_files = [f for f in all_files if f.is_file()] # Only files, not directories
-
+
# Count files by subject
sub01_files = [f for f in all_files if "sub-01" in str(f)]
sub02_files = [f for f in all_files if "sub-02" in str(f)]
-
+
print(f"Total files in defaced dataset: {len(all_files)}")
print(f"sub-01 files: {len(sub01_files)}")
print(f"sub-02 files: {len(sub02_files)}")
-
+
# Verify that sub-02 does NOT appear anywhere in the final defaced dataset
- assert len(sub02_files) == 0, f"sub-02 should be completely excluded from final defaced dataset, but found {len(sub02_files)} files: {[str(f) for f in sub02_files]}"
-
+ assert (
+ len(sub02_files) == 0
+ ), f"sub-02 should be completely excluded from final defaced dataset, but found {len(sub02_files)} files: {[str(f) for f in sub02_files]}"
+
# Verify that sub-01 exists and was processed
assert len(sub01_files) > 0, "sub-01 should exist in final defaced dataset"
- assert (final_defaced_dir / "sub-01").exists(), "sub-01 directory should exist in final defaced dataset"
-
+ assert (
+ final_defaced_dir / "sub-01"
+ ).exists(), "sub-01 directory should exist in final defaced dataset"
+
# Verify processing artifacts exist for sub-01 in derivatives
derivatives_dir = final_defaced_dir / "derivatives" / "petdeface"
if derivatives_dir.exists():
sub01_defacemasks = list(derivatives_dir.glob("**/sub-01*defacemask*"))
sub01_lta_files = list(derivatives_dir.glob("**/sub-01*.lta"))
-
+
print(f"sub-01 defacemasks found: {len(sub01_defacemasks)}")
print(f"sub-01 lta files found: {len(sub01_lta_files)}")
-
- assert len(sub01_defacemasks) > 0, "sub-01 should have been processed and have defacemasks"
- assert len(sub01_lta_files) > 0, "sub-01 should have been processed and have lta registration files"
+
+ assert (
+ len(sub01_defacemasks) > 0
+ ), "sub-01 should have been processed and have defacemasks"
+ assert (
+ len(sub01_lta_files) > 0
+ ), "sub-01 should have been processed and have lta registration files"
def test_no_anat():