-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtracking.py
More file actions
188 lines (155 loc) · 6.53 KB
/
tracking.py
File metadata and controls
188 lines (155 loc) · 6.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Mouse tracking module - Specialized for mouse center-of-mass detection and tracking
"""
import cv2
import numpy as np
from config import MAX_GAP_FRAMES
def track_mouse(video_path, start_frame, end_frame, transform_params, sample_rate=1):
"""
Track mouse position from video
Args:
video_path (str): Path to video file
start_frame (int): Analysis start frame
end_frame (int): Analysis end frame
transform_params (dict): Transformation parameters
sample_rate (int, optional): Sampling rate. Default is 1 (all frames)
Returns:
dict: Tracking results (position information, etc.)
"""
# Get transformation parameters
matrix = transform_params['matrix']
warp_size = transform_params['warp_size']
camera_params = transform_params.get('camera_params')
use_camera_undistortion = transform_params.get('use_camera_undistortion', False)
thresh_value = transform_params.get('thresh_value', 50)
min_contour_area = transform_params.get('min_contour_area', 100)
experiment_type = transform_params.get('experiment_type', '1')
# Data structures for tracking
positions = []
area_history = []
frame_indices = []
# Initialize video capture
cap = cv2.VideoCapture(video_path)
# Background subtractor settings
background_subtractor = cv2.createBackgroundSubtractorMOG2(
history=500, varThreshold=50, detectShadows=True
)
# Position of previous frame
prev_x, prev_y = None, None
# Note: max_movement parameter remains in the code but
# is not used in the new detect_mouse function
max_movement = 50 * sample_rate
# Process sample frames
total_frames_to_process = (end_frame - start_frame) // sample_rate + 1
for i, frame_idx in enumerate(range(start_frame, end_frame + 1, sample_rate)):
# Progress display (percentage)
progress_percent = int((i / total_frames_to_process) * 100)
print(f"Tracking progress: {progress_percent}% complete (Frame {frame_idx}/{end_frame})", end='\r')
# Get frame
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
ret, frame = cap.read()
if not ret:
break
# Preprocessing (distortion correction and perspective transformation)
processed_frame = preprocess_frame(
frame, matrix, warp_size,
use_camera_undistortion, camera_params
)
# Mouse detection processing
x, y, area = detect_mouse(
processed_frame,
background_subtractor,
thresh_value,
min_contour_area,
prev_x, prev_y,
max_movement
)
# Save results
positions.append((x, y))
frame_indices.append(frame_idx)
if area is not None:
area_history.append(area)
# Save current position for next frame
prev_x, prev_y = x, y
cap.release()
print("\nTracking progress: 100% complete - All frames processed ")
# Create "clean" position list excluding None values
positions_clean = [(x, y) for x, y in positions if x is not None and y is not None]
valid_percent = len(positions_clean) / len(positions) * 100 if positions else 0
print(f"Valid tracking data: {len(positions_clean)}/{len(positions)} frames ({valid_percent:.1f}%)")
# Return results
return {
'positions': positions,
'positions_smooth': positions_clean,
'frame_indices': frame_indices,
'area_history': area_history,
'sample_rate': sample_rate,
'experiment_type': experiment_type
}
def preprocess_frame(frame, matrix, warp_size, use_undistortion=False, camera_params=None):
"""
Apply preprocessing (distortion correction and perspective transformation) to frame
Args:
frame: Video frame to process
matrix: Perspective transformation matrix
warp_size: Size after transformation
use_undistortion: Whether to use distortion correction
camera_params: Camera parameters
Returns:
Preprocessed frame
"""
# Distortion correction
if use_undistortion and camera_params:
frame = cv2.undistort(
frame,
camera_params['camera_matrix'],
camera_params['dist_coeffs'],
None,
camera_params['new_camera_matrix']
)
# Perspective transformation
warped = cv2.warpPerspective(frame, matrix, warp_size)
return warped
def detect_mouse(frame, background_subtractor, thresh_value, min_contour_area, prev_x=None, prev_y=None, max_movement=50):
"""
Detect mouse within frame and return its center coordinates
Args:
frame: Frame to process
background_subtractor: Background subtractor
thresh_value: Binary threshold value
min_contour_area: Minimum contour area
prev_x, prev_y: Position in previous frame
max_movement: Maximum allowable movement distance
Returns:
tuple: (x coordinate, y coordinate, area) - Contains None if detection fails
"""
# Background subtraction method
fg_mask = background_subtractor.apply(frame)
_, fg_mask = cv2.threshold(fg_mask, 200, 255, cv2.THRESH_BINARY)
# Brightness-based binarization
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, thresh_value, 255, cv2.THRESH_BINARY_INV)
# Mask combination
combined_mask = cv2.bitwise_or(thresh, fg_mask)
# Noise removal
kernel = np.ones((5, 5), np.uint8)
clean_mask = cv2.morphologyEx(combined_mask, cv2.MORPH_OPEN, kernel)
clean_mask = cv2.morphologyEx(clean_mask, cv2.MORPH_CLOSE, kernel)
# Contour detection
contours, _ = cv2.findContours(clean_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
valid_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > min_contour_area]
# Center of mass calculation
cx, cy, area = None, None, None
if valid_contours:
# Detect largest contour (assumed to be mouse)
largest = max(valid_contours, key=cv2.contourArea)
area = cv2.contourArea(largest)
# Calculate center of mass from moments
M = cv2.moments(largest)
if M["m00"] != 0: # Prevent division by zero
# Center of mass calculation - calculate with floating point not integers
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
return cx, cy, area