diff --git a/.plan/todo.md b/.plan/todo.md index ce1c0267..06fcaf08 100644 --- a/.plan/todo.md +++ b/.plan/todo.md @@ -73,6 +73,8 @@ - Manual MMAL: https://gist.github.com/rwb27/a23808e9f4008b48de95692a38ddaa08 - Stop motion +https://www.raspberrypi.com/documentation/accessories/camera.html + # MMAL Adjustments https://github.com/labthings/picamerax/blob/master/picamerax/mmal.py - MMAL_PARAMETER_HIGH_DYNAMIC_RANGE, diff --git a/case/base_plate/JankyCam Case Base v0.10.stl b/case/base_plate/JankyCam Case Base v0.10.stl new file mode 100644 index 00000000..97640639 Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.10.stl differ diff --git a/case/base_plate/JankyCam Case Base v0.11.stl b/case/base_plate/JankyCam Case Base v0.11.stl new file mode 100644 index 00000000..40042ee9 Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.11.stl differ diff --git a/case/base_plate/JankyCam Case Base v0.12.stl b/case/base_plate/JankyCam Case Base v0.12.stl new file mode 100644 index 00000000..68494627 Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.12.stl differ diff --git a/case/base_plate/JankyCam Case Base v0.13.stl b/case/base_plate/JankyCam Case Base v0.13.stl new file mode 100644 index 00000000..e9d2c72b Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.13.stl differ diff --git a/case/base_plate/JankyCam Case Base v0.3.stl b/case/base_plate/JankyCam Case Base v0.3.stl new file mode 100644 index 00000000..f4c52190 Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.3.stl differ diff --git a/case/base_plate/JankyCam Case Base v0.4.stl b/case/base_plate/JankyCam Case Base v0.4.stl new file mode 100644 index 00000000..8fe90cce Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.4.stl differ diff --git a/case/base_plate/JankyCam Case Base v0.5.stl b/case/base_plate/JankyCam Case Base v0.5.stl new file mode 100644 index 00000000..37c4269d Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.5.stl differ diff --git a/case/base_plate/JankyCam Case Base v0.6.stl b/case/base_plate/JankyCam Case Base v0.6.stl new file mode 100644 index 00000000..8b4de74a Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.6.stl differ diff --git a/case/base_plate/JankyCam Case Base v0.7.stl b/case/base_plate/JankyCam Case Base v0.7.stl new file mode 100644 index 00000000..eb596b8e Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.7.stl differ diff --git a/case/base_plate/JankyCam Case Base v0.8.stl b/case/base_plate/JankyCam Case Base v0.8.stl new file mode 100644 index 00000000..0b50ea2b Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.8.stl differ diff --git a/case/base_plate/JankyCam Case Base v0.9.stl b/case/base_plate/JankyCam Case Base v0.9.stl new file mode 100644 index 00000000..159fbd29 Binary files /dev/null and b/case/base_plate/JankyCam Case Base v0.9.stl differ diff --git a/case/base_plate/JankyCam Case v0.1.stl b/case/base_plate/JankyCam Case v0.1.stl new file mode 100644 index 00000000..f1a62343 Binary files /dev/null and b/case/base_plate/JankyCam Case v0.1.stl differ diff --git a/case/base_plate/JankyCam Case v0.2.stl b/case/base_plate/JankyCam Case v0.2.stl new file mode 100644 index 00000000..10923109 Binary files /dev/null and b/case/base_plate/JankyCam Case v0.2.stl differ diff --git a/case/base_plate/JankyCam Case v0.stl b/case/base_plate/JankyCam Case v0.stl new file mode 100644 index 00000000..693b6878 Binary files /dev/null and b/case/base_plate/JankyCam Case v0.stl differ diff --git a/case/base_plate/version 9 project.3mf b/case/base_plate/version 9 project.3mf new file mode 100644 index 00000000..798c1f82 Binary files /dev/null and b/case/base_plate/version 9 project.3mf differ diff --git a/src/camera_handler.py b/src/camera_handler.py index d17a33bf..ab295579 100644 --- a/src/camera_handler.py +++ b/src/camera_handler.py @@ -79,6 +79,7 @@ def start_camera(original_config, skip_auto=False, skip_button_listen=False): camera.resolution = (screen_w, screen_h) camera.framerate = screen_fps # fps + camera.image_denoise = False # TODO: Make this configurable? overlay = overlay_handler.add_overlay(camera, overlay, config) overlay_handler.display_text(camera, '', config) @@ -150,6 +151,9 @@ def button_callback_4(): else: if config["hdr"]: take_hdr_shot(camera, overlay, config) + elif not config["hdr"] and config["continuous_shot"]: + for i in range(config["continuous_shot_count"]): + take_single_shot(camera, overlay, config) else: take_single_shot(camera, overlay, config) @@ -221,8 +225,16 @@ def auto_mode(camera, overlay, config, skip_dpc=False): adjust_fom(camera, config) set_fom(camera, config) + set_hdr2(camera, config) + overlay_handler.display_text(camera, '', config) +def set_overlay(camera, overlay, config): + if config["show_overlay"] == False: + overlay_handler.display_text(camera, '', config) + if config["show_overlay"] == True: + overlay_handler.hide_overlay(camera, config) + def adjust_exposure_mode(camera, config): idex = config["available_exposure_modes"].index(config["exposure_mode"]) + 1 @@ -313,6 +325,18 @@ def compute_framerate(camera, config): return framerate +def adjust_effect(camera, config): + idex = config["available_camera_effects"].index(config["selected_camera_effect"]) + 1 + + if idex < len(config["available_camera_effects"]): + config["selected_camera_effect"] = config["available_camera_effects"][idex] + else: + config["selected_camera_effect"] = config["default_camera_effect"] + + camera.image_effect = config["selected_camera_effect"] + overlay_handler.display_text(camera, '', config) + print(f'image_effect: {config["selected_camera_effect"]}') + def adjust_awb_mode(camera, config): idex = config["available_awb_mode"].index(config["awb_mode"]) + 1 @@ -400,6 +424,10 @@ def set_hdr2(camera, config): mmal_handler.set_mmal_parameter(camera, parameter, value) print(f'hdr2: {config["hdr2"]}') +def adjust_shot(camera, config): + config["continuous_shot"] = not config["continuous_shot"] + overlay_handler.display_text(camera, '', config) + def zoom(camera, config): current_zoom = camera.zoom print(f'current_zoom: {current_zoom}') @@ -503,8 +531,13 @@ def take_single_shot(camera, overlay, config): filecount = len(existing_files) frame_count = filecount - raw_filename = f'{dcim_images_path_raw}/{frame_count}.dng' - original_filename = f'{dcim_original_images_path}/{frame_count}.{format}' + if config["continuous_shot"] == True: + raw_filename = f'{dcim_images_path_raw}/{frame_count}_continuous.dng' + original_filename = f'{dcim_original_images_path}/{frame_count}_continuous.{format}' + else: + raw_filename = f'{dcim_images_path_raw}/{frame_count}.dng' + original_filename = f'{dcim_original_images_path}/{frame_count}.{format}' + print(original_filename) stream = BytesIO() @@ -543,31 +576,35 @@ def take_single_shot(camera, overlay, config): camera.shutter_speed = 0 def trigger_video(camera, overlay, config): - if config["recording"]: - camera.stop_recording() - config["recording"] = False - else: - screen_w = config["screen_w"] - screen_h = config["screen_h"] + screen_w = config["screen_w"] + screen_h = config["screen_h"] + + width = config["video_width"] + height = config["video_height"] + + dcim_videos_path = config["dcim_videos_path"] - width = config["width"] - height = config["height"] + format = config["video_format"] - dcim_videos_path = config["dcim_videos_path"] + existing_files = glob.glob(f'{dcim_videos_path}/*.{format}') + filecount = len(existing_files) + + original_filename = f'{dcim_videos_path}/{filecount}.{format}' + print(original_filename) + + camera.resolution = (width, height) + camera.framerate = config["recording_fps"] + print(f'screen: ({screen_w}, {screen_h}), res: ({width}, {height}), shutter_speed: {camera.shutter_speed}') - format = config["video_format"] + config["recording"] = True - existing_files = glob.glob(f'{dcim_videos_path}/*.{format}') - filecount = len(existing_files) + camera.start_recording(original_filename, format) - original_filename = f'{dcim_videos_path}/{filecount}.{format}' - print(original_filename) + time.sleep(config["recording_time"]) - camera.resolution = (width, height) - print(f'screen: ({screen_w}, {screen_h}), res: ({width}, {height}), shutter_speed: {camera.shutter_speed}') + camera.stop_recording() - config["recording"] = True - camera.start_recording(original_filename, format) + config["recording"] = False def write_via_thread(original_filename, write_type, stream): w = ThreadWriter(original_filename, write_type) diff --git a/src/main.py b/src/main.py index 1b6e8c7b..f1128d90 100644 --- a/src/main.py +++ b/src/main.py @@ -34,7 +34,18 @@ # width = 4056 # height = 3040 -VERSION = "0.0.35" +# TODO: Add libcamera support +# https://www.raspberrypi.com/documentation/accessories/camera.html#post-processing-with-opencv +# https://www.raspberrypi.com/documentation/accessories/camera.html#building-libcamera-and-libcamera-apps +# https://forums.raspberrypi.com/viewtopic.php?t=273018&sid=567d0c674da8d03e159dbd998d20ace3&start=125 + +# TODO: Brightness +# TODO: Contrast +# TODO: Saturation +# TODO: Higher fps view +# TODO: Fix overlay turn off to use existing methods + +VERSION = "0.0.40" # Modules import document_handler @@ -69,10 +80,11 @@ "screen_h": 960, # 768 #760 # 240 screen res # Needs to be 4:3 "overlay_w": 320, "overlay_h": 240, + "show_overlay": True, "width": 4056, # Image width "height": 3040, # Image height - "video_width": 4056, - "video_height": 3040, + "video_width": 1920, # 1920x1080 + "video_height": 1080, # 1920x1080 "annotate_text_size": 48, # 6 to 160, inclusive. The default is 32 "exposure_mode": 'auto', "default_exposure_mode": 'auto', @@ -96,25 +108,125 @@ "beach", "fireworks" ], + "selected_camera_effect": 'none', + "default_camera_effect": 'none', + "available_camera_effects": [ + "none", + "negative", + "solarize", + "sketch", + "denoise", + "emboss", + "oilpaint", + "hatch", + "gpen", + "pastel", + "watercolor", + "film", + "blur", + "saturation", + "colorswap", + "washedout", + "posterise", + "colorpoint", + "colorbalance", + "cartoon", + "deinterlace1", + "deinterlace2" + ], "available_isos": [0, 5, 10, 25, 50, 100, 200, 320, 400, 500, 640, 800, 1600], # 0 is auto / 3200, 6400 "iso": 5, #0, # 800 / should shift to 0 - auto "default_iso": 5, - "available_shutter_speeds": [0, 100, 500, 1000, 1500, 2000, 4000, 8000, 3000, 16667, 33333, 66667, 125000, 250000, 500000, 1000000], # 1/10000, 1/2000, 1/1000, ... - "available_long_shutter_speeds": [0, 1000000, 2000000, 3000000, 4000000, 5000000, 10000000, 15000000, 20000000, 25000000, 30000000, 35000000, 40000000, 200000000], + "available_shutter_speeds": [ + 0, + 100, + 500, + 1000, + 1500, + 2000, + 4000, + 8000, + 3000, + 16667, + 33333, + 66667, + 125000, + 250000, + 500000, + 1000000 + ], # 1/10000, 1/2000, 1/1000, ... + "available_long_shutter_speeds": [ + 0, + 1000000, + 2000000, + 3000000, + 4000000, + 5000000, + 10000000, + 15000000, + 20000000, + 25000000, + 30000000, + 35000000, + 40000000, + 200000000 + ], "take_long_shutter_speed": False, "shutter_speed": 0, "long_shutter_speed": 0, "default_shutter_speed": 0, - "available_awb_mode": ['auto', 'off', 'sunlight', 'cloudy', 'shade', 'tungsten', 'fluorescent', 'incandescent', 'flash', 'horizon'], + "available_awb_mode": [ + 'auto', + 'off', + 'sunlight', + 'cloudy', + 'shade', + 'tungsten', + 'fluorescent', + 'incandescent', + 'flash', + 'horizon' + ], "awb_mode": 'auto', "default_awb_mode": 'auto', # "awb_gains": 0.0 - 8.0 (), "dpc": 0, # 0 - 3, default is 3 and 0 is disabled "default_dpc": 0, "raw_convert": True, "available_dpc_options": [0, 1, 2, 3], #https://www.raspberrypi.org/forums/viewtopic.php?f=43&t=277768 - "current_menu_items": ["auto", "shutter_speed", "iso", "hdr2", "delay_time", "long_shutter_speed", "sub_menu"], - "available_menu_items": ["auto", "shutter_speed", "iso", "hdr2", "delay_time", "long_shutter_speed", "sub_menu"], - "available_sub_menu_items": ["sub_menu", "exposure_mode", "awb_mode", "hdr", "video", "resolution", "encoding", "dpc - star eater", "raw_convert", "fom"], + "current_menu_items": [ + "auto", + "shutter_speed", + "iso", + "delay_time", + "long_shutter_speed", + "effect", + "overlay", + "sub_menu" + ], + "available_menu_items": [ + "auto", + "shutter_speed", + "iso", + "delay_time", + "long_shutter_speed", + "effect", + "overlay", + "sub_menu" + ], + "available_sub_menu_items": [ + "sub_menu", + "continuous_shot", + "exposure_mode", + "awb_mode", + "hdr", + "hdr2", + "video", + "resolution", + "encoding", + "dpc - star eater", + "raw_convert", + "fom" + ], "menu_item": "auto", "default_menu_item": "auto", "hdr": False, @@ -123,18 +235,22 @@ "default_fom": True, "fom_overlay_x_padding": 50, # in pixels "fom_overlay_y_padding": 50, # in pixels - "hdr2": False, + "hdr2": True, "preview_mode": "built-in", # "built-in" "continuous_shot" + "continuous_shot": False, + "continuous_shot_count": 15, "default_preview_mode": 'built-in', "video": False, "recording": False, + "recording_time": 30, # 30 secs + "recording_fps": 30, # 30 secs "encoding": False, # TODO "gpio": { "button_1": 27, "button_2": 23, "button_3": 22, "button_4": 17, - "bouncetime": 450 + "bouncetime": 500 } } diff --git a/src/menu_handler.py b/src/menu_handler.py index 5deb08a6..6e86e164 100644 --- a/src/menu_handler.py +++ b/src/menu_handler.py @@ -2,6 +2,10 @@ import camera_handler def select_menu_item(camera, config): + if config["show_overlay"] == False: + camera_handler.set_overlay(camera, overlay, config) + return True + idex = config["current_menu_items"].index(config["menu_item"]) + 1 if idex < len(config["current_menu_items"]): @@ -13,6 +17,10 @@ def select_menu_item(camera, config): print(f'menu_item: {config["menu_item"]}') def select_option(camera, overlay, config): + if config["show_overlay"] == False: + camera_handler.set_overlay(camera, overlay, config) + return True + if config["menu_item"] == "auto": camera_handler.auto_mode(camera, overlay, config) if config["menu_item"] == "exposure_mode": @@ -44,6 +52,12 @@ def select_option(camera, overlay, config): camera_handler.set_hdr2(camera, config) if config["menu_item"] == "delay_time": camera_handler.adjust_delay(camera, config) + if config["menu_item"] == "continuous_shot": + camera_handler.adjust_shot(camera, config) + if config["menu_item"] == "effect": + camera_handler.adjust_effect(camera, config) + if config["menu_item"] == "overlay": + camera_handler.set_overlay(camera, overlay, config) if config["menu_item"] == "sub_menu": handle_sub_menu(config) @@ -51,6 +65,6 @@ def handle_sub_menu(config): if (config["current_menu_items"] == config["available_menu_items"]): config["current_menu_items"] = config["available_sub_menu_items"] else: - config["current_menu_items"] = config["available_items"] + config["current_menu_items"] = config["available_menu_items"] config["default_menu_item"] = config["current_menu_items"][0] diff --git a/src/overlay_handler.py b/src/overlay_handler.py index 1c271aaa..d8bfae82 100644 --- a/src/overlay_handler.py +++ b/src/overlay_handler.py @@ -21,10 +21,15 @@ def compute_shutter_speed_from_us(us): else: return f'1/{converted_seconds} ({us} us.)' +def hide_overlay(camera, config): + camera.annotate_text = "" + def display_text(camera, text, config): # camera.annotate_text = f'{camera.annotate_text} - {camera.exposure_mode}' if config["video"]: - mode = "Video Mode" + mode = f'Video Mode ({config["recording_time"]} secs)' + elif config["continuous_shot"] == True: + mode = f'Photo Mode Continuous ({config["continuous_shot_count"]} frames)' else: mode = "Photo Mode" @@ -47,8 +52,10 @@ def display_text(camera, text, config): framerate = camera.framerate + effect_text = f'Effect: {camera.image_effect}' + boolean_text = f'hdr: {config["hdr"]}; hdr2: {config["hdr2"]}, raw: {config["raw_convert"]}, dpc: {config["dpc"]}' - output_text = f'{mode} - fps: {framerate} {config["set_zoom"]}\n{camera_settings}\n{boolean_text}\n{selected_item}\n{shutter_text}\n{text}' + output_text = f'{mode} - fps: {framerate} {config["set_zoom"]}\n{camera_settings}\n{boolean_text}\n{selected_item}\n{shutter_text}\n{effect_text}\n{text}' camera.annotate_text_size = config["annotate_text_size"] camera.annotate_text = output_text diff --git a/tools/image_compare.py b/tools/image_compare.py new file mode 100644 index 00000000..484f18bd --- /dev/null +++ b/tools/image_compare.py @@ -0,0 +1,39 @@ +import sys +sys.path.insert(1, '../src/') +sys.path.insert(1, 'src/') + +import os +import glob +import math + +import cv2 +import numpy as np + +import rawpy +from PIL import Image + +# Modules +import document_handler + +# TODO: +# Blur Combine +# Contrast equalisation / compensation +# Progress bar +# EXIF Copy + +raw_file_path = '/mnt/g/tmp/812 Waxing Gibbons/raw/812.dng' +save_path = '/mnt/g/tmp/812 Waxing Gibbons/raw/' +frames_save_path = f'{save_path}/frames' + +print(save_path) +document_handler.detect_or_create_folder(frames_save_path) + +output_filetype = '.jpg' + +save_frames = True +sharpen = False +normalise = True +denoise = False + +gamma = 2.4 +bit_depth = 24 diff --git a/tools/laplacian_sharpen.py b/tools/laplacian_sharpen.py new file mode 100644 index 00000000..484f18bd --- /dev/null +++ b/tools/laplacian_sharpen.py @@ -0,0 +1,39 @@ +import sys +sys.path.insert(1, '../src/') +sys.path.insert(1, 'src/') + +import os +import glob +import math + +import cv2 +import numpy as np + +import rawpy +from PIL import Image + +# Modules +import document_handler + +# TODO: +# Blur Combine +# Contrast equalisation / compensation +# Progress bar +# EXIF Copy + +raw_file_path = '/mnt/g/tmp/812 Waxing Gibbons/raw/812.dng' +save_path = '/mnt/g/tmp/812 Waxing Gibbons/raw/' +frames_save_path = f'{save_path}/frames' + +print(save_path) +document_handler.detect_or_create_folder(frames_save_path) + +output_filetype = '.jpg' + +save_frames = True +sharpen = False +normalise = True +denoise = False + +gamma = 2.4 +bit_depth = 24 diff --git a/tools/process_hdr_from_raw.py b/tools/process_hdr_from_raw.py index eaa9ca93..3b5a4b80 100644 --- a/tools/process_hdr_from_raw.py +++ b/tools/process_hdr_from_raw.py @@ -26,12 +26,14 @@ # raw_file_path = '/mnt/g/tmp/749 Waning Gibbons/raw/749.dng' # raw_file_path = '/mnt/g/tmp/761 Waning Gibbons/raw/761.dng' # raw_file_path = '/mnt/g/tmp/773 Waxing Gibbons/raw/773.dng' -raw_file_path = '/mnt/g/tmp/784 Waxing Gibbons/raw/784.dng' +# raw_file_path = '/mnt/g/tmp/784 Waxing Gibbons/raw/784.dng' +raw_file_path = '/mnt/g/tmp/812 Waxing Gibbons/raw/812.dng' # save_path = '/mnt/g/tmp/725 Half Moon/raw/output' # save_path = '/mnt/g/tmp/749 Waning Gibbons/raw/' # save_path = '/mnt/g/tmp/761 Waning Gibbons/raw/' # save_path = '/mnt/g/tmp/773 Waxing Gibbons/raw/' -save_path = '/mnt/g/tmp/784 Waxing Gibbons/raw/' +# save_path = '/mnt/g/tmp/784 Waxing Gibbons/raw/' +save_path = '/mnt/g/tmp/812 Waxing Gibbons/raw/' frames_save_path = f'{save_path}/frames' print(save_path) diff --git a/tools/process_raw_output.py b/tools/process_raw_output.py index f0538eaa..7e98e9c0 100644 --- a/tools/process_raw_output.py +++ b/tools/process_raw_output.py @@ -19,9 +19,9 @@ # Constants # original_files_path = "/mnt/g/tmp/original" -original_files_path = "/mnt/g/tmp/784 Waxing Gibbons/raw/original" +original_files_path = "/data/photography/continious_shot/3" # raw_file_save_path = "/mnt/g/tmp/raw" -raw_file_save_path = "/mnt/g/tmp/784 Waxing Gibbons/raw/" +raw_file_save_path = "/data/photography/continious_shot/3/raw/" filetype = '.dng' # TODO: List them all @@ -31,11 +31,11 @@ # colour_profile_path = "../Colour_Profiles/imx477/PyDNG_profile" config = { - "neutral_colour_profile": "../Colour_Profiles/imx477/Raspberry Pi High Quality Camera Lumariver 2860k-5960k Neutral Look.json", + "neutral_colour_profile": "/home/trex22/development/Colour_Profiles/imx477/Raspberry Pi High Quality Camera Lumariver 2860k-5960k Neutral Look.json", "neutral_colour_profile_name": "neutral_colour", - "skin_tone_colour_profile": "../Colour_Profiles/imx477/Raspberry Pi High Quality Camera Lumariver 2860k-5960k Skin+Sky Look.json", + "skin_tone_colour_profile": "/home/trex22/development/Colour_Profiles/imx477/Raspberry Pi High Quality Camera Lumariver 2860k-5960k Skin+Sky Look.json", "skin_tone_colour_profile_name": "skin_tone", - "pydng_colour_profile": "../Colour_Profiles/imx477/PyDNG_profile.json", + "pydng_colour_profile": "/home/trex22/development/Colour_Profiles/imx477/PyDNG_profile.json", "pydng_colour_profile_name": "pydng", "selected_colour_profile": "neutral_colour_profile" #"all" # can be all or neutral_colour_profile, skin_tone_colour_profile, pydng_colour_profile ... others to be added later } diff --git a/tools/stack_fft_images_from_raw.py b/tools/stack_fft_images_from_raw.py new file mode 100644 index 00000000..b0626d83 --- /dev/null +++ b/tools/stack_fft_images_from_raw.py @@ -0,0 +1,321 @@ +# https://docs.opencv.org/4.5.3/de/dbc/tutorial_py_fourier_transform.html +import sys +sys.path.insert(1, '../src/') +sys.path.insert(1, 'src/') + +import os +import glob +import math + +import cv2 +import numpy as np +from scipy import fftpack +import matplotlib.pyplot as plt +import rawpy + +import platform +environment = platform.system().lower() +print(f'Environment detected: {environment}') + +import rawpy +from PIL import Image + +# Modules +import document_handler + +# TODO: +# Blur Combine +# Contrast equalisation / compensation +# Progress bar +# EXIF Copy +# Select Base Image + +if (environment == 'windows'): + stacked_file_paths = [ + "G:\\tmp\\continious_shot\\3\\885_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\886_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\887_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\888_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\889_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\890_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\891_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\892_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\893_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\894_continuous.jpeg" + ] + + # raw_file_path = '/run/media/trex22/Scratch Disk/tmp/812 Waxing Gibbons/raw/812.dng' + + save_path = 'G:\\tmp\\continious_shot\\3\\output\\' + frames_save_path = f'{save_path}\\frames' +else: + stacked_file_paths = [ + "/data/photography/continious_shot/3/885_continuous.jpeg", + "/data/photography/continious_shot/3/886_continuous.jpeg", + "/data/photography/continious_shot/3/887_continuous.jpeg", + "/data/photography/continious_shot/3/888_continuous.jpeg", + "/data/photography/continious_shot/3/889_continuous.jpeg", + "/data/photography/continious_shot/3/890_continuous.jpeg", + "/data/photography/continious_shot/3/891_continuous.jpeg", + "/data/photography/continious_shot/3/892_continuous.jpeg", + "/data/photography/continious_shot/3/893_continuous.jpeg", + "/data/photography/continious_shot/3/894_continuous.jpeg" + ] + + # stacked_file_paths = [ + # "/data/photography/continious_shot/3/raw/885_continuous.dng", + # "/data/photography/continious_shot/3/raw/886_continuous.dng", + # "/data/photography/continious_shot/3/raw/887_continuous.dng", + # "/data/photography/continious_shot/3/raw/888_continuous.dng", + # "/data/photography/continious_shot/3/raw/889_continuous.dng", + # "/data/photography/continious_shot/3/raw/890_continuous.dng", + # "/data/photography/continious_shot/3/raw/891_continuous.dng", + # "/data/photography/continious_shot/3/raw/892_continuous.dng", + # "/data/photography/continious_shot/3/raw/893_continuous.dng", + # "/data/photography/continious_shot/3/raw/894_continuous.dng" + # ] + + # raw_file_path = '/run/media/trex22/Scratch Disk/tmp/812 Waxing Gibbons/raw/812.dng' + + save_path = '/run/media/trex22/Scratch Disk/tmp/continious_shot/3/output/' + frames_save_path = f'{save_path}/frames' + +print('Compute fft avg from a stack of images') +print(f'Save path: {save_path}') +# document_handler.detect_or_create_folder(frames_save_path) + +output_filetype = '.png' + +save_frames = True +sharpen = False +normalise = True +denoise = False + +gamma = 2.4 +bit_depth = 24 + +# https://stackoverflow.com/questions/38476359/fft-on-image-with-python +# http://scipy-lectures.org/intro/scipy/auto_examples/solutions/plot_fft_image_denoise.html +def filter(fft, keep_fraction = 0.1): + im_fft2 = fft.copy() + + # Set r and c to be the number of rows and columns of the array. + r, c = im_fft2.shape + + # Set to zero all rows with indices between r*keep_fraction and + # r*(1-keep_fraction): + im_fft2[int(r*keep_fraction):int(r*(1-keep_fraction))] = 0 + + # Similarly with the columns: + im_fft2[:, int(c*keep_fraction):int(c*(1-keep_fraction))] = 0 + + return im_fft2 + +def fft(channel): + # fft = fftpack.fft2(channel) + fft = np.fft.fft2(channel) + # fft = np.fft.fftn(channel) # whole image + # fft = np.fft.fftn(channel[...,::-1]) + # fft = np.fft.fftshift(channel) + # fft *= 255.0 / fft.max() # proper scaling into 0..255 range + # return np.absolute(fft) + return fft + +def ifft(channel): + # ifft = np.fft.ifftshift(channel) + # ifftc *= 255.0 / ifft.max() + # ifft = np.fft.ifft2(ifft) + + # ifft = fftpack.ifft2(channel).real + # channel_shift = channel * (255.0 / channel.max()) + ifft = np.fft.ifft2(channel) + # ifft = np.fft.ifftn(channel_shift) + # ifft = np.fft.ifftn(channel) + # ifft *= 255.0 / ifft.max() # proper scaling back to 0..255 range + + # return np.absolute(ifft) + return ifft + +def channel_shift(channel): + return channel * (255.0 / channel.max()) + +def filter_fft(frame, keep_fraction = 0.1): + # ifftshift, fft2, fft + # f_ishift = np.fft.ifftshift(frame) + # return f_ishift + + channels = cv2.split(frame.real) + result_array = np.zeros_like(frame.real) + + if frame.shape[2] > 1: # grayscale images have only one channel + for i, channel in enumerate(channels): + result_array[..., i] = filter(channel, keep_fraction) + else: + result_array[...] = filter(channels[0], keep_fraction) + + return np.array(result_array) # Image.fromarray(result_array) + +def to_fft(image): + # ifftshift, fft2, fft + # f_ishift = np.fft.ifftshift(frame) + # return f_ishift + + # channels = cv2.split(frame) + # result_array = np.zeros_like(frame, dtype=float) + + # if frame.shape[2] > 1: # grayscale images have only one channel + # for i, channel in enumerate(channels): + # ichan = np.fft.fft2(channel) + # # ichan *= 255.0 / ichan.max() + # # result_array[..., i] = ichan + # result_array[..., i] = np.fft.fftshift(ichan) + + # # result_array = np.fft.fft2(frame) + # else: + # result_array[...] = np.fft.fft2(channels[0]) + + # return np.array(result_array) # Image.fromarray(result_array) + rgb_fft = np.zeros_like(image, dtype=float) + + for i in range(3): + rgb_fft[..., i] = np.fft.fftshift(np.fft.fft2((image[:, :, i]))) + + return np.array(rgb_fft) + +def from_fft(frame): + # img_back = np.fft.ifft2(f_ishift) + # return img_back + # channels = np.split(frame, 3, axis=2) + # result_array = np.zeros_like(frame, dtype=float) + + # if frame.shape[2] > 1: # grayscale images have only one channel + # for i, channel in enumerate(channels): + # ichan = np.fft.fft2(channel[:,:,0]) + # # ichan *= 255.0 / ichan.max() + # result_array[..., i] = np.abs(np.fft.ifft2(ichan)) + + # result_array[..., 0] = np.fft.ifft2(frame[:,:,0]).real + # result_array[..., 1] = np.fft.ifft2(frame[:,:,1]).real + # result_array[..., 2] = np.fft.ifft2(frame[:,:,2]).real + + # result_array = np.absolute(np.fft.ifftn(frame)) + # result_array = np.fft.ifftn(frame) #.real + # else: + # result_array[...] = np.fft.ifft2(channels[0]) + # result_array = np.zeros_like(frame, dtype=float) + # result_array[..., 0] = abs(np.fft.ifft2(frame[:,:,0])) + # result_array[..., 1] = abs(np.fft.ifft2(frame[:,:,1])) + # result_array[..., 2] = abs(np.fft.ifft2(frame[:,:,2])) + + # return np.array(result_array) # Image.fromarray(result_array) + + transformed_channels = [ + abs(np.fft.ifft2(frame[0])), + abs(np.fft.ifft2(frame[1])), + abs(np.fft.ifft2(frame[2])) + ] + return np.dstack([transformed_channels[0].astype(int), + transformed_channels[1].astype(int), + transformed_channels[2].astype(int)]) + +# https://stackoverflow.com/questions/43626200/numpy-mean-of-complex-numbers-with-infinities#43626307 +# https://stackoverflow.com/questions/43626200/numpy-mean-of-complex-numbers-with-infinities +def avg_tensor(tensor_stack): + # return np.mean(tensor_stack, axis=0) + return np.mean(tensor_stack[np.isfinite(tensor_stack)], axis=0) + +def save(frame, name): + cv2.imwrite(f'{save_path}{name}{output_filetype}', frame) + +def plot(image, caption='Original image'): + # im = plt.imread('../../../../data/moonlanding.png').astype(float) + + plt.figure() + # plt.imshow(image, plt.cm.gray) + # plt.imshow(image[...,::-1]) # cv2.cvtColor(lena, cv2.COLOR_BGR2RGB) + plt.imshow(image) + plt.title(caption) + plt.show() + +def save_stack(stack, name): + index = 0 + + for frame in stack: + save(frame, f'{name}_{index}') + index += 1 + +def generate_fft_stack(images): + fft_stack = [] + + for image in images: + fft_stack.append(to_fft(image)) + + return np.array(fft_stack) + +def open_file(path): + return cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB) + # return rawpy.imread(path).raw_image + +def open_files(stacked_file_paths): + list_of_images = [] + + for raw_file_path in stacked_file_paths: + # raw = rawpy.imread(raw_file_path) + # im = raw.raw_image + list_of_images.append(open_file(raw_file_path)) + + return np.array(list_of_images) + +# def find_significance(fft_frames): +# stacked_file_paths + +# https://www.pythonpool.com/numpy-ifft/ +# https://towardsdatascience.com/image-processing-with-python-application-of-fourier-transformation-5a8584dc175b +def fourier_transform_rgb(image): + f_size = 25 + transformed_channels = [] + for i in range(3): + rgb_fft = np.fft.fftshift(np.fft.fft2((image[:, :, i]))) + rgb_fft[:225, 235:237] = 1 + rgb_fft[-225:,235:237] = 1 + transformed_channels.append(abs(np.fft.ifft2(rgb_fft))) + + final_image = np.dstack([transformed_channels[0].astype(int), + transformed_channels[1].astype(int), + transformed_channels[2].astype(int)]) + + fig, ax = plt.subplots(1, 2, figsize=(17,12)) + ax[0].imshow(image) + ax[0].set_title('Original Image', fontsize = f_size) + ax[0].set_axis_off() + + ax[1].imshow(final_image) + ax[1].set_title('Transformed Image', fontsize = f_size) + ax[1].set_axis_off() + + fig.tight_layout() + plt.show() + +base_images = open_files(stacked_file_paths) +print(f'Number of files: {len(base_images)}') + +# base_image = base_images[0] +# fourier_transform_rgb(base_image) + +print("Generate FFT Stack") +fft_stack = generate_fft_stack(base_images) + +print('Compute AVG Tensor') +avg_fft = avg_tensor(fft_stack) +# plot(avg_fft.real, 'AVG FFT') + +print('Filter AVG Tensor') +# filtered_base_image = filter_fft(avg_fft, keep_fraction = 0.2) +# plot(filtered_base_image, 'FILTER AVG FFT') + +print('Convert back to image') +avg_image = from_fft(avg_fft) +# save(avg_image, f'avg_fft_converted') +plot(avg_image.real, 'AVG_IMAGE') + +print('Complete!') diff --git a/tools/stack_fft_images_generate_red.py b/tools/stack_fft_images_generate_red.py new file mode 100644 index 00000000..3f6f853b --- /dev/null +++ b/tools/stack_fft_images_generate_red.py @@ -0,0 +1,257 @@ +# Generates interesting red effect + +# https://docs.opencv.org/4.5.3/de/dbc/tutorial_py_fourier_transform.html +import sys +sys.path.insert(1, '../src/') +sys.path.insert(1, 'src/') + +import os +import glob +import math + +import cv2 +import numpy as np +from scipy import fftpack +import matplotlib.pyplot as plt +import rawpy + +import platform +environment = platform.system().lower() +print(f'Environment detected: {environment}') + +import rawpy +from PIL import Image + +# Modules +import document_handler + +# TODO: +# Blur Combine +# Contrast equalisation / compensation +# Progress bar +# EXIF Copy +# Select Base Image + +if (environment == 'windows'): + stacked_file_paths = [ + "G:\\tmp\\continious_shot\\3\\885_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\886_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\887_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\888_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\889_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\890_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\891_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\892_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\893_continuous.jpeg", + "G:\\tmp\\continious_shot\\3\\894_continuous.jpeg" + ] + + # raw_file_path = '/run/media/trex22/Scratch Disk/tmp/812 Waxing Gibbons/raw/812.dng' + + save_path = 'G:\\tmp\\continious_shot\\3\\output\\' + frames_save_path = f'{save_path}\\frames' +else: + stacked_file_paths = [ + "/data/photography/continious_shot/3/885_continuous.jpeg", + "/data/photography/continious_shot/3/886_continuous.jpeg", + "/data/photography/continious_shot/3/887_continuous.jpeg", + "/data/photography/continious_shot/3/888_continuous.jpeg", + "/data/photography/continious_shot/3/889_continuous.jpeg", + "/data/photography/continious_shot/3/890_continuous.jpeg", + "/data/photography/continious_shot/3/891_continuous.jpeg", + "/data/photography/continious_shot/3/892_continuous.jpeg", + "/data/photography/continious_shot/3/893_continuous.jpeg", + "/data/photography/continious_shot/3/894_continuous.jpeg" + ] + + # stacked_file_paths = [ + # "/data/photography/continious_shot/3/raw/885_continuous.dng", + # "/data/photography/continious_shot/3/raw/886_continuous.dng", + # "/data/photography/continious_shot/3/raw/887_continuous.dng", + # "/data/photography/continious_shot/3/raw/888_continuous.dng", + # "/data/photography/continious_shot/3/raw/889_continuous.dng", + # "/data/photography/continious_shot/3/raw/890_continuous.dng", + # "/data/photography/continious_shot/3/raw/891_continuous.dng", + # "/data/photography/continious_shot/3/raw/892_continuous.dng", + # "/data/photography/continious_shot/3/raw/893_continuous.dng", + # "/data/photography/continious_shot/3/raw/894_continuous.dng" + # ] + + # raw_file_path = '/run/media/trex22/Scratch Disk/tmp/812 Waxing Gibbons/raw/812.dng' + + save_path = '/run/media/trex22/Scratch Disk/tmp/continious_shot/3/output/' + frames_save_path = f'{save_path}/frames' + +print('Compute fft avg from a stack of images') +print(f'Save path: {save_path}') +# document_handler.detect_or_create_folder(frames_save_path) + +output_filetype = '.png' + +save_frames = True +sharpen = False +normalise = True +denoise = False + +gamma = 2.4 +bit_depth = 24 + +# https://stackoverflow.com/questions/38476359/fft-on-image-with-python +# http://scipy-lectures.org/intro/scipy/auto_examples/solutions/plot_fft_image_denoise.html +def filter(fft, keep_fraction = 0.1): + im_fft2 = fft.copy() + + # Set r and c to be the number of rows and columns of the array. + r, c = im_fft2.shape + + # Set to zero all rows with indices between r*keep_fraction and + # r*(1-keep_fraction): + im_fft2[int(r*keep_fraction):int(r*(1-keep_fraction))] = 0 + + # Similarly with the columns: + im_fft2[:, int(c*keep_fraction):int(c*(1-keep_fraction))] = 0 + + return im_fft2 + +def fft(channel): + # fft = fftpack.fft2(channel) + fft = np.fft.fft2(channel) + # fft = np.fft.fftn(channel) # whole image + # fft = np.fft.fftn(channel[...,::-1]) + # fft = np.fft.fftshift(channel) + # fft *= 255.0 / fft.max() # proper scaling into 0..255 range + # return np.absolute(fft) + return fft + +def ifft(channel): + # ifft = np.fft.ifftshift(channel) + # ifftc *= 255.0 / ifft.max() + # ifft = np.fft.ifft2(ifft) + + # ifft = fftpack.ifft2(channel).real + # channel_shift = channel * (255.0 / channel.max()) + ifft = np.fft.ifft2(channel) + # ifft = np.fft.ifftn(channel_shift) + # ifft = np.fft.ifftn(channel) + # ifft *= 255.0 / ifft.max() # proper scaling back to 0..255 range + + # return np.absolute(ifft) + return ifft + +def channel_shift(channel): + return channel * (255.0 / channel.max()) + +def filter_fft(frame, keep_fraction = 0.1): + # ifftshift, fft2, fft + # f_ishift = np.fft.ifftshift(frame) + # return f_ishift + + channels = cv2.split(frame.real) + result_array = np.zeros_like(frame.real) + + if frame.shape[2] > 1: # grayscale images have only one channel + for i, channel in enumerate(channels): + result_array[..., i] = filter(channel, keep_fraction) + else: + result_array[...] = filter(channels[0], keep_fraction) + + return np.array(result_array) # Image.fromarray(result_array) + +def to_fft(frame): + # ifftshift, fft2, fft + # f_ishift = np.fft.ifftshift(frame) + # return f_ishift + + channels = cv2.split(frame) + result_array = np.zeros_like(frame, dtype=float) + + if frame.shape[2] > 1: # grayscale images have only one channel + # for i, channel in enumerate(channels): + # result_array[..., i] = fft(channel) + result_array = np.fft.fftn(frame) + else: + result_array[...] = fft(channels[0]) + + return np.array(result_array) # Image.fromarray(result_array) + +def from_fft(frame): + # img_back = np.fft.ifft2(f_ishift) + # return img_back + channels = np.split(frame, 3, axis=2) + result_array = np.zeros_like(frame, dtype=float) + + if frame.shape[2] > 1: # grayscale images have only one channel + for i, channel in enumerate(channels): + result_array[..., i] = ifft(channel[:,:,0]) + + # result_array = np.absolute(np.fft.ifftn(frame)) + # result_array = np.fft.ifftn(frame) #.real + else: + result_array[...] = ifft(channels[0]) + + return np.array(result_array) # Image.fromarray(result_array) + +def avg_tensor(tensor_stack): + return np.mean(tensor_stack, axis=0) + +def save(frame, name): + cv2.imwrite(f'{save_path}{name}{output_filetype}', frame) + +def plot(image, caption='Original image'): + # im = plt.imread('../../../../data/moonlanding.png').astype(float) + + plt.figure() + # plt.imshow(image, plt.cm.gray) + # plt.imshow(image[...,::-1]) # cv2.cvtColor(lena, cv2.COLOR_BGR2RGB) + plt.imshow(image) + plt.title(caption) + plt.show() + +def save_stack(stack, name): + index = 0 + + for frame in stack: + save(frame, f'{name}_{index}') + index += 1 + +def generate_fft_stack(images): + fft_stack = [] + + for image in images: + fft_stack.append(to_fft(image)) + + return np.array(fft_stack) + +def open_file(path): + return cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB) + # return rawpy.imread(path).raw_image + +def open_files(stacked_file_paths): + list_of_images = [] + + for raw_file_path in stacked_file_paths: + # raw = rawpy.imread(raw_file_path) + # im = raw.raw_image + list_of_images.append(open_file(raw_file_path)) + + return np.array(list_of_images) + +# def find_significance(fft_frames): +# stacked_file_paths + +base_images = open_files(stacked_file_paths) +print(f'Number of files: {len(base_images)}') + +base_image = base_images[0] +# save(base_image, 'base_image') +# plot(base_image, 'Base Image') + +# https://www.pythonpool.com/numpy-ifft/ + +# TEST: +# base_fft = to_fft(base_image) +# base_fft = fft(base_image[:,:,0]) +base_fft = fft(base_image) +plot(from_fft(to_fft(base_image)).real) + +print('Complete!')