-
Notifications
You must be signed in to change notification settings - Fork 100
Expand file tree
/
Copy pathpublisher.py
More file actions
408 lines (336 loc) · 14.3 KB
/
publisher.py
File metadata and controls
408 lines (336 loc) · 14.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
#
# Apache v2 license
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import socket
import cv2
import pandas as pd
import paho.mqtt.client as mqtt
import time
import base64
import subprocess
import requests
import json
import os
import glob
from typing import Tuple, Optional
import logging
MQTT_BROKER = os.getenv("MQTT_BROKER", "ia-mqtt-broker")
MEDIAMTX_SERVER = os.getenv("MEDIAMTX_SERVER", "mediamtx")
MEDIAMTX_PORT = os.getenv("MEDIAMTX_PORT", "8554")
RTSP_STREAM_NAME = os.getenv("RTSP_STREAM_NAME", "live.stream")
TS_TOPIC = os.getenv("TS_TOPIC", "weld-data")
RTSP_URL = f"rtsp://{MEDIAMTX_SERVER}:{MEDIAMTX_PORT}/{RTSP_STREAM_NAME}"
ffmpeg_proc = None
client = None
FRAME_RATE = 30 # Frames per second for video streaming
FRAME_WIDTH = 960
FRAME_HEIGHT = 600
published_data = []
# Configure logging
log_level = os.getenv('LOG_LEVEL', 'INFO').upper()
logging_level = getattr(logging, log_level, logging.INFO)
# Configure logging
logging.basicConfig(
level=logging_level, # Set the log level to DEBUG
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', # Log format
)
logger = logging.getLogger(__name__)
def read_simulation_files(base_filename: str, simulation_data_dir: str = "/simulation-data") -> Tuple[Optional[str], Optional[str]]:
"""
Read paired simulation files (video and CSV) with the same base name.
Args:
base_filename: Base name of the files (without extension)
simulation_data_dir: Directory containing simulation data files
Returns:
Tuple of (video_path, csv_path) if both files exist, otherwise (None, None)
"""
video_path = os.path.join(simulation_data_dir, f"{base_filename}.avi")
csv_path = os.path.join(simulation_data_dir, f"{base_filename}.csv")
# Check if both files exist
if os.path.exists(video_path) and os.path.exists(csv_path):
logger.debug(f"Found paired files:")
logger.debug(f" Video: {video_path}")
logger.debug(f" CSV: {csv_path}")
return video_path, csv_path
else:
logger.warning(f"Could not find both files for base name '{base_filename}'")
if not os.path.exists(video_path):
logger.warning(f" Missing video file: {video_path}")
if not os.path.exists(csv_path):
logger.warning(f" Missing CSV file: {csv_path}")
return None, None
def get_available_simulation_files(simulation_data_dir: str = "/simulation-data") -> list:
"""
Get list of available simulation file pairs.
Args:
simulation_data_dir: Directory containing simulation data files
Returns:
List of base filenames that have both video and CSV files
"""
# Find all .avi files
video_files = glob.glob(os.path.join(simulation_data_dir, "*.avi"))
available_pairs = []
for video_file in video_files:
base_name = os.path.splitext(os.path.basename(video_file))[0]
csv_file = os.path.join(simulation_data_dir, f"{base_name}.csv")
if os.path.exists(csv_file):
available_pairs.append(base_name)
return available_pairs
def load_simulation_data(base_filename: str, simulation_data_dir: str = "/simulation-data") -> Tuple[Optional[cv2.VideoCapture], Optional[pd.DataFrame]]:
"""
Load both video and CSV data for a given base filename.
Args:
base_filename: Base name of the files (without extension)
simulation_data_dir: Directory containing simulation data files
Returns:
Tuple of (video_capture, dataframe) if successful, otherwise (None, None)
"""
video_path, csv_path = read_simulation_files(base_filename, simulation_data_dir)
if video_path is None or csv_path is None:
return None, None
try:
# Load video
video_cap = cv2.VideoCapture(video_path)
if not video_cap.isOpened():
logger.error(f"Error: Could not open video file {video_path}")
return None, None
# Load CSV
df = pd.read_csv(csv_path)
logger.debug(f"Successfully loaded:")
logger.debug(f" Video frames: {int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))} CSV rows: {len(df)}")
return video_cap, df
except Exception as e:
logger.error(f"Error loading simulation data: {e}")
return None, None
def stream_video_and_csv(base_filename: str, simulation_data_dir: str = "/simulation-data", target_fps: float = None):
"""
Stream video and CSV data via MQTT and RTSP.
Args:
base_filename: Base name of the files to stream (without extension).
If None, uses default hardcoded paths.
simulation_data_dir: Directory containing simulation data files
target_fps: Target FPS for streaming. If None, uses original video FPS.
If provided, will downsample the video to this FPS rate.
"""
if base_filename:
# Use the new function to load paired files
cap, df = load_simulation_data(base_filename, simulation_data_dir)
if cap is None or df is None:
logger.error(f"Failed to load simulation data for '{base_filename}'")
return
else:
logger.error("No base filename provided, skipping streaming.")
return
num_rows = len(df)
frame_id = 0
# Open video (if not already opened by load_simulation_data)
if not cap.isOpened():
logger.error("Error: Could not open video file")
return
fps = cap.get(cv2.CAP_PROP_FPS)
original_fps = fps
# Apply FPS downframing if specified
if target_fps is not None and target_fps > 0:
if target_fps > fps:
logger.fatal(f"Warning: Target FPS ({target_fps}) is higher than original FPS ({fps}). Using original FPS.")
effective_fps = fps
else:
effective_fps = target_fps
logger.info(f"Downframing from {fps:.2f} FPS to {effective_fps:.2f} FPS")
else:
effective_fps = fps
# Calculate frame skip ratio for downframing
frame_skip_ratio = int(fps / effective_fps) if effective_fps < fps else 1
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration_sec = total_frames / fps if fps > 0 else 0
logger.info(f"Video duration: {duration_sec:.2f} seconds, Original FPS: {original_fps:.2f}, Effective FPS: {effective_fps:.2f}, Total frames: {total_frames}")
if frame_skip_ratio > 1:
logger.info(f"Frame skip ratio: {frame_skip_ratio} (showing every {frame_skip_ratio} frames)")
# Correlate each CSV row to a time window in the video
# Each row covers duration_sec / num_rows seconds
row_time_window = duration_sec / num_rows if num_rows > 0 else 0
logger.info(f"Row time window: {row_time_window:.2f} seconds")
# MQTT setup
global client
client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2)
client.connect(MQTT_BROKER)
start_ffmpeg = False
global ffmpeg_proc
frame_count = 0
processed_frame_count = 0 # Count of frames actually processed/streamed
while cap.isOpened():
ret, frame = cap.read()
if not ret:
# Reset video to beginning for looping
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
frame_count = 0
processed_frame_count = 0
global published_data
published_data = []
return
# Skip frames for downframing
if frame_count % frame_skip_ratio != 0:
frame_count += 1
continue
# Calculate which CSV row this frame belongs to (based on original timing)
current_time = frame_count / original_fps if original_fps > 0 else 0
row_idx = int(current_time / row_time_window) if row_time_window > 0 else 0
logger.info(f"Frame {frame_count}, Time: {current_time:.2f}s, Row: {row_idx}, Processed: {processed_frame_count} for '{base_filename}'")
if row_idx >= num_rows:
row_idx = num_rows - 1
csv_row = df.iloc[row_idx].to_dict()
# Publish to MQTT
# Stream frame bytes as RTSP video using ffmpeg subprocess
# This requires ffmpeg to be installed and accessible
# Write frame bytes to ffmpeg stdin
ffmpeg_proc.stdin.write(frame.tobytes())
if "Date" in csv_row:
del csv_row["Date"]
if "Time" in csv_row:
del csv_row["Time"]
if "Remarks " in csv_row:
del csv_row["Remarks "]
if "Part No " in csv_row:
del csv_row["Part No"]
now_ns = time.time_ns()
seconds = now_ns // 1_000_000_000
nanoseconds = now_ns % 1_000_000_000
csv_row["time"] = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(seconds)) + f".{nanoseconds:09d}Z"
# csv_row["frame_id"] = frame_id
csv_row = json.dumps(csv_row)
# Publish each CSV row only once
# global published_data
client.publish(TS_TOPIC, str(csv_row))
frame_id += 1
frame_count += 1
processed_frame_count += 1
time.sleep(1 / effective_fps) # Use effective FPS for timing
cap.release()
def check_and_load_simulation_files(target_fps):
"""
Display the available simulation file pairs and provide usage examples.
"""
available_files = get_available_simulation_files()
if not available_files:
logger.info("No simulation file pairs found!")
return
continuous_ingestion = os.getenv("CONTINUOUS_SIMULATOR_INGESTION", "true").lower() == "true"
streamed_once = False
while True:
if not streamed_once:
for i, filename in enumerate(available_files, 1):
logger.info(f" {i}. {filename}")
stream_video_and_csv(filename, target_fps=target_fps)
if not continuous_ingestion:
streamed_once = True
if not continuous_ingestion:
logger.info("Streaming completed once as CONTINUOUS_SIMULATOR_INGESTION is set to false. Sleeping indefinitely.")
time.sleep(60) # Sleep indefinitely after one complete streaming
def is_port_accessible(Host: str, Port: int):
"""
Check if a port is accessible on a given host.
:param host: The hostname or IP address to check.
:param port: The port number to check.
:param timeout: The timeout in seconds for the connection attempt.
:return: True if the port is accessible, False otherwise.
"""
logger.info("Waiting for %s accessible...", Host)
while True:
try:
# Create a socket object
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# Set the timeout for the connection attempt
sock.settimeout(5)
# Attempt to connect to the host and port
sock.connect((Host, Port))
logger.info("%s is accessible...", Host)
return True
except (socket.timeout, socket.error):
pass
time.sleep(1)
def start_dlsps_pipeline_server(Host: str, Port: int):
"""
Start the DLStreamPipelineServer using a subprocess call to the dlstreamer-pipeline-server binary.
"""
logger.info("Starting DLStreamPipelineServer...")
try:
# Create the pipeline via HTTP request
payload = {
"source": {
"uri": "rtsp://mediamtx:8554/live.stream",
"type": "uri",
},
"destination": {
"metadata": {
"type": "mqtt",
"topic": "vision_weld_defect_classification",
},
"frame": [
{
"type": "webrtc",
"peer-id": "samplestream",
}
],
},
"parameters": {
"classification-properties": {
"model": "/home/pipeline-server/resources/models/weld-defect-classification-f16-DeiT/deployment/Classification/model/model.xml",
"device": "CPU",
}
},
}
response = requests.post(
f"http://{Host}:{Port}/pipelines/user_defined_pipelines/weld_defect_classification",
json=payload,
timeout=10,
)
response.raise_for_status()
logger.info("DLStreamPipelineServer started successfully.")
return response
except Exception as e:
logger.error("Failed to start DLStreamPipelineServer: %s", e)
return None
if __name__ == "__main__":
# Uncomment the line below to see available files
# demo_available_files()
# Example of using specific simulation files with FPS downframing:
# stream_video_and_csv("good_weld_02-16-23-0081-00") # Original FPS
# stream_video_and_csv("good_weld_02-16-23-0081-00", target_fps=10) # Downsample to 10 FPS
# stream_video_and_csv("crater_cracks_03-20-23-0122-11", target_fps=5) # Downsample to 5 FPS
# Default behavior - process all available files
client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2)
client.connect(MQTT_BROKER)
ts_host = os.getenv("TS_MS_SERVER", "ia-time-series-analytics-microservice")
ts_port = int(os.getenv("TS_MS_PORT", "9092"))
dlsps_host = os.getenv("DLSPS_MS_SERVER", "dlstreamer-pipeline-server")
dlsps_port = int(os.getenv("DLSPS_MS_PORT", "8080"))
is_port_accessible(ts_host, ts_port)
time.sleep(10)
# is_port_accessible(dlsps_host, dlsps_port)
target_fps = int(os.getenv("SIMULATION_TARGET_FPS", "10"))
ffmpeg_cmd = [
"ffmpeg",
"-re",
"-f", "rawvideo",
"-pix_fmt", "bgr24",
"-s", f"{FRAME_WIDTH}x{FRAME_HEIGHT}",
"-r", str(target_fps),
"-i", "-", # Read from stdin
"-c:v", "libx264",
"-preset", "ultrafast",
"-f", "rtsp",
"-rtsp_transport", "tcp", # <— important, avoids UDP NAT timeouts
RTSP_URL
]
ffmpeg_proc = subprocess.Popen(ffmpeg_cmd, stdin=subprocess.PIPE)
# dlsps_proc = start_dlsps_pipeline_server(dlsps_host, dlsps_port)
# if dlsps_proc is None:
# logger.error("DLStreamPipelineServer failed to start. Exiting.")
# exit(1)
check_and_load_simulation_files(target_fps)
if 'ffmpeg_proc' in locals():
ffmpeg_proc.stdin.close()
ffmpeg_proc.wait()
client.disconnect()