-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsre-screen-sherpa.py
More file actions
143 lines (121 loc) · 5.6 KB
/
sre-screen-sherpa.py
File metadata and controls
143 lines (121 loc) · 5.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import cv2
import json
import time
import numpy as np
def parse_timestamp(timestamp):
# Convert timestamp (e.g., "0:07.5") to seconds
minutes, seconds = timestamp.split(':')
return float(minutes) * 60 + float(seconds)
def timestamp_to_frame(timestamp, fps):
# Convert timestamp to frame number
seconds = parse_timestamp(timestamp)
return int(seconds * fps)
def wrap_text(text, font, scale, thickness, max_width):
# Calculate the maximum number of characters that can fit in max_width
words = text.split()
lines = []
current_line = []
for word in words:
test_line = ' '.join(current_line + [word])
text_size = cv2.getTextSize(test_line, font, scale, thickness)[0]
if text_size[0] <= max_width:
current_line.append(word)
else:
if current_line:
lines.append(' '.join(current_line))
current_line = [word]
if current_line:
lines.append(' '.join(current_line))
return lines
# Load graph data from JSON
with open('video-annotations.json', 'r') as f:
graph_data = json.load(f)
# Open the video file
video_path = 'input-video.mp4'
cap = cv2.VideoCapture(video_path)
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(f"Video resolution: {width}x{height}, FPS: {fps}")
# Prepare graph entries with frame numbers and display windows
for entry in graph_data['Graphs']:
entry['frame_number'] = timestamp_to_frame(entry['timestamp'], fps)
entry['end_frame'] = entry['frame_number'] + (5 * fps) # Show for 5 seconds
print(f"Entry: {entry['name']}, timestamp: {entry['timestamp']}, frame_number: {entry['frame_number']}, end_frame: {entry['end_frame']}")
frame_count = 0
processed_frames = []
print("Processing video...")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame_count += 1
# Check if any graph entry should be displayed at this frame
current_entry = None
for entry in graph_data['Graphs']:
if entry['frame_number'] <= frame_count <= entry['end_frame']:
current_entry = entry
break
if current_entry:
# Display priority (top right, white text on dark purple background)
priority_text = str(current_entry['priority'])
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 3.0 # Increased font size
thickness = 8 # Increased thickness
border_thickness = 16
text_size = cv2.getTextSize(priority_text, font, scale, thickness)[0]
x = width - text_size[0] - 30
y = 120 # Move down a bit for larger text
# Draw #8200f3 background rectangle
cv2.rectangle(frame, (x - 20, y - text_size[1] - 20), (x + text_size[0] + 20, y + 40), (243, 0, 130), -1)
# White text
cv2.putText(frame, priority_text, (x, y), font, scale, (255, 255, 255), thickness, cv2.LINE_AA)
# Display graph name (top left, white text on dark purple background)
name_text = current_entry['name']
name_scale = 3.0 # Increased font size
name_thickness = 8
name_x = 30
name_y = 120
name_text_size = cv2.getTextSize(name_text, font, name_scale, name_thickness)[0]
# Draw #8200f3 background rectangle
cv2.rectangle(frame, (name_x - 20, name_y - name_text_size[1] - 20), (name_x + name_text_size[0] + 20, name_y + 40), (243, 0, 130), -1)
# White text
cv2.putText(frame, name_text, (name_x, name_y), font, name_scale, (255, 255, 255), name_thickness, cv2.LINE_AA)
# Display feedback (bottom, wrapped if needed, white text on dark purple background)
feedback_text = current_entry['feedback']
feedback_scale = 2.2 # Increased font size
feedback_thickness = 6
feedback_spacing = 80
max_width = int(width * 0.8)
wrapped_lines = wrap_text(feedback_text, font, feedback_scale, feedback_thickness, max_width)
total_height = len(wrapped_lines) * feedback_spacing
start_y = height - 120 - total_height
for i, line in enumerate(wrapped_lines):
text_size = cv2.getTextSize(line, font, feedback_scale, feedback_thickness)[0]
feedback_x = (width - text_size[0]) // 2
feedback_y = start_y + (i * feedback_spacing)
# Draw #8200f3 background rectangle
cv2.rectangle(frame, (feedback_x - 20, feedback_y - text_size[1] - 20), (feedback_x + text_size[0] + 20, feedback_y + 40), (243, 0, 130), -1)
# White text
cv2.putText(frame, line, (feedback_x, feedback_y), font, feedback_scale, (255, 255, 255), feedback_thickness, cv2.LINE_AA)
# Draw rectangle overlay for the active entry using JSON coordinates (draw after overlays)
# if current_entry and 'top_left' in current_entry and 'bottom_right' in current_entry:
# top_left = tuple(current_entry['top_left'])
# bottom_right = tuple(current_entry['bottom_right'])
# cv2.rectangle(frame, top_left, bottom_right, (0, 0, 255), 10) # Red rectangle, thickness 10
processed_frames.append(frame.copy())
# Display the frame (optional)
cv2.imshow('Datadog Video Annotation', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
print("Creating final video...")
# Create the final video
final_output_path = 'annoted_video.mp4'
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(final_output_path, fourcc, fps, (width, height))
for frame in processed_frames:
out.write(frame)
out.release()
print(f"Processing complete. Final video saved to {final_output_path}")