@@ -982,7 +982,7 @@ struct aws_future_http_message *s_s3_prepare_upload_part(struct aws_s3_request *
982982 uint64_t offset = 0 ;
983983 size_t request_body_size = s_compute_request_body_size (meta_request , request -> part_number , & offset );
984984 request -> request_stream = aws_input_stream_new_from_parallel_stream (
985- allocator , meta_request -> request_body_parallel_stream , offset , request_body_size );
985+ allocator , meta_request -> request_body_parallel_stream , meta_request , offset , request_body_size );
986986 request -> content_length = request_body_size ;
987987 struct aws_s3_auto_ranged_put * auto_ranged_put = meta_request -> impl ;
988988
@@ -1020,14 +1020,71 @@ struct aws_future_http_message *s_s3_prepare_upload_part(struct aws_s3_request *
10201020 s_s3_prepare_upload_part_finish (part_prep , AWS_ERROR_SUCCESS );
10211021 } else {
10221022 printf ("PARALLEL retry 8MB read\n" );
1023+ // Create unique filename with timestamp and meta_request pointer
1024+ char filename [256 ];
1025+ struct timespec ts ;
1026+ clock_gettime (CLOCK_REALTIME , & ts );
1027+ snprintf (
1028+ filename ,
1029+ sizeof (filename ),
1030+ "/tmp/s3_read_metrics_%p_%ld_%ld.csv" ,
1031+ (void * )meta_request ,
1032+ ts .tv_sec ,
1033+ ts .tv_nsec );
1034+
1035+ FILE * metrics_file = fopen (filename , "w" );
1036+ /* BEGIN CRITICAL SECTION */
1037+ aws_s3_meta_request_lock_synced_data (meta_request );
1038+ /* write every read metric to a file */
1039+ size_t metric_length = aws_array_list_length (& meta_request -> read_metrics_list );
1040+ /* write every read metric to a file */
1041+ if (metrics_file ) {
1042+ // Write CSV header
1043+ fprintf (metrics_file , "index,offset,size,start_timestamp,end_timestamp,duration_ns,throughput_mbps\n" );
1044+ // Write all metrics
1045+ for (size_t j = 0 ; j < metric_length ; j ++ ) {
1046+ struct s3_data_read_metrics m ;
1047+ aws_array_list_get_at (& meta_request -> read_metrics_list , & m , j );
1048+
1049+ uint64_t duration = m .end_timestamp - m .start_timestamp ;
1050+ double throughput_mbps =
1051+ duration > 0 ? (double )(m .size * 8 ) / (duration / 1000.0 ) / 1000000.0 : 0.0 ;
1052+
1053+ fprintf (
1054+ metrics_file ,
1055+ "%zu,%llu,%llu,%llu,%llu,%llu,%.2f\n" ,
1056+ j ,
1057+ (unsigned long long )m .offset ,
1058+ (unsigned long long )m .size ,
1059+ (unsigned long long )m .start_timestamp ,
1060+ (unsigned long long )m .end_timestamp ,
1061+ (unsigned long long )duration ,
1062+ throughput_mbps );
1063+ }
1064+ aws_array_list_clean_up (& meta_request -> read_metrics_list );
1065+ fclose (metrics_file );
1066+
1067+ AWS_LOGF_INFO (
1068+ AWS_LS_S3_META_REQUEST ,
1069+ "id=%p Wrote %zu read metrics to %s" ,
1070+ (void * )meta_request ,
1071+ metric_length ,
1072+ filename );
1073+ } else {
1074+ AWS_LOGF_ERROR (
1075+ AWS_LS_S3_META_REQUEST , "id=%p Failed to open metrics file %s" , (void * )meta_request , filename );
1076+ }
1077+ aws_s3_meta_request_unlock_synced_data (meta_request );
1078+ /* END CRITICAL SECTION */
1079+
10231080 /* Not the first time preparing request (e.g. retry).
10241081 * We can skip over the async steps that read the body stream */
10251082 /* Seek back to beginning of the stream. */
10261083 aws_input_stream_release (request -> request_stream );
10271084 uint64_t offset = 0 ;
10281085 size_t request_body_size = s_compute_request_body_size (meta_request , request -> part_number , & offset );
10291086 request -> request_stream = aws_input_stream_new_from_parallel_stream (
1030- allocator , meta_request -> request_body_parallel_stream , offset , request_body_size );
1087+ allocator , meta_request -> request_body_parallel_stream , meta_request , offset , request_body_size );
10311088 s_s3_prepare_upload_part_finish (part_prep , AWS_ERROR_SUCCESS );
10321089 }
10331090 } else if (request -> num_times_prepared == 0 ) {
0 commit comments