diff --git a/example/unix_socket_client.py b/examples/unix_socket_client.py similarity index 98% rename from example/unix_socket_client.py rename to examples/unix_socket_client.py index 9b24684..5557aa3 100755 --- a/example/unix_socket_client.py +++ b/examples/unix_socket_client.py @@ -1,6 +1,4 @@ import asyncio -import socket -import os class AsyncUnixSocketClient: def __init__(self, socket_path): diff --git a/example/virtual_cam.py b/examples/virtual_cam.py similarity index 69% rename from example/virtual_cam.py rename to examples/virtual_cam.py index 7d6a49f..e1f1609 100644 --- a/example/virtual_cam.py +++ b/examples/virtual_cam.py @@ -1,22 +1,22 @@ """ Camera Stream to Virtual V4L2 Device ------------------------------------ -This script captures images from the Raspberry Pi camera and streams them +This script captures images from the Raspberry Pi camera and streams them to a virtual V4L2 loopback device using OpenCV. Usage: 1. Install required dependencies: pip install opencv-python picamera2 - + 2. Load v4l2loopback module (if not already loaded): - sudo modprobe v4l2loopback devices=1 video_nr=16 card_label=ProcessedCam max_buffers=4 exclusive_caps=1 - + sudo modprobe v4l2loopback devices=1 video_nr=8 card_label=ProcessedCam max_buffers=4 exclusive_caps=1 + 3. Run the script: python virtual_cam.py 4. Test the video output: - /path/to/pi-webrtc --camera=v4l2:16 --width=1920 --height=1080 ... # View the processed feed by WebRTC - ffplay /dev/video16 # View the processed feed by ffplay + /path/to/pi-webrtc --camera=v4l2:8 --width=1920 --height=1080 ... # View the processed feed by WebRTC + ffplay /dev/video8 # View the processed feed by ffplay Requirements: - Raspberry Pi with Camera Module @@ -29,6 +29,7 @@ import fcntl import v4l2 import logging +import argparse from picamera2 import Picamera2, MappedArray logging.basicConfig( @@ -37,11 +38,11 @@ class VirtualCameraStreamer: - def __init__(self, video_nr, camera_id=0, width=1920, height=1080): + def __init__(self, width, height, camera_id, virtual_camera): self.width = width self.height = height self.camera_id = camera_id - self.virtual_camera = f"/dev/video{video_nr}" + self.virtual_camera = virtual_camera self.fd = None self.picam2 = None @@ -88,13 +89,17 @@ def _process_frame(self, request): self.stop() def start(self): + logging.info(f"Starting streamer with:") + logging.info(f" Resolution: {self.width}x{self.height}") + logging.info(f" Camera ID: {self.camera_id}") + logging.info(f" Output To Virtual Device: {self.virtual_camera}") + if not self.fd: logging.error("Cannot start streaming without virtual device.") return self.picam2.pre_callback = self._process_frame self.picam2.start() - logging.info(f"Start streaming to {self.virtual_camera}...") try: while True: @@ -113,5 +118,23 @@ def stop(self): if __name__ == "__main__": - streamer = VirtualCameraStreamer(16, camera_id=1) + parser = argparse.ArgumentParser(description="Start virtual camera streamer") + parser.add_argument("--width", type=int, default=1920, help="Frame width") + parser.add_argument("--height", type=int, default=1080, help="Frame height") + parser.add_argument("--camera-id", type=int, default=0, help="Camera input ID") + parser.add_argument( + "--virtual-device", + type=str, + default="/dev/video8", + help="Virtual video device path", + ) + args = parser.parse_args() + + streamer = VirtualCameraStreamer( + width=args.width, + height=args.height, + camera_id=args.camera_id, + virtual_camera=args.virtual_device, + ) + streamer.start() diff --git a/example/yolo_cam.py b/examples/yolo_cam.py similarity index 76% rename from example/yolo_cam.py rename to examples/yolo_cam.py index 2693bdd..ffdaf80 100644 --- a/example/yolo_cam.py +++ b/examples/yolo_cam.py @@ -1,28 +1,29 @@ """ Camera Stream to Virtual V4L2 Device ------------------------------------ -This script captures images from the Raspberry Pi camera and streams them -to a virtual V4L2 loopback device using OpenCV. +This script captures images from the Raspberry Pi camera and streams them +to a virtual V4L2 loopback device using OpenCV. -It allows real-time object detection using YOLO while keeping the original +It allows real-time object detection using YOLO while keeping the original camera feed accessible without delay. Usage: 1. Install required dependencies: pip install opencv-python ultralytics - - 2. Load v4l2loopback module: - sudo modprobe v4l2loopback devices=2 video_nr=16,17 card_label=RelayCam,YoloCam max_buffers=4 exclusive_caps=1,1 - + + 2. Reload v4l2loopback module: + sudo modprobe -r v4l2loopback + sudo modprobe v4l2loopback devices=2 video_nr=8,9 card_label=RelayCam,YoloCam max_buffers=4 exclusive_caps=1,1 + 3. Start `virtual_cam.py` first: - python virtual_cam.py - + python ./examples/virtual_cam.py --width 1920 --height 1080 --camera-id 0 --virtual-device /dev/video8 + 4. Run `yolo_cam.py` to apply YOLO detection: - python yolo_cam.py + python ./examples/yolo_cam.py --input-device /dev/video8 --output-device /dev/video9 --width 1920 --height 1080 5. Test the video output: - /path/to/pi-webrtc --camera=v4l2:16 --width=1920 --height=1080 ... # View original camera feed - /path/to/pi-webrtc --camera=v4l2:17 --width=1920 --height=1080 ... # View YOLO-processed feed + /path/to/pi-webrtc --camera=v4l2:8 --width=1920 --height=1080 ... # View original camera feed + /path/to/pi-webrtc --camera=v4l2:9 --width=1920 --height=1080 ... # View YOLO-processed feed Requirements: - Raspberry Pi with Camera Module @@ -36,6 +37,7 @@ import fcntl import v4l2 import logging +import argparse from ultralytics import YOLO model = YOLO("/home/pi/yolo11n.pt") @@ -155,5 +157,31 @@ def stop(self): if __name__ == "__main__": - streamer = VirtualCameraStreamer("/dev/video16", "/dev/video17") + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser(description="Virtual camera streamer") + parser.add_argument( + "--input-device", + type=str, + default="/dev/video8", + help="Input camera device path", + ) + parser.add_argument( + "--output-device", + type=str, + default="/dev/video", + help="Output virtual camera path", + ) + parser.add_argument("--width", type=int, default=1920, help="Frame width") + parser.add_argument("--height", type=int, default=1080, help="Frame height") + + args = parser.parse_args() + + streamer = VirtualCameraStreamer( + input_device=args.input_device, + output_device=args.output_device, + width=args.width, + height=args.height, + ) + streamer.start()