@@ -168,7 +168,6 @@ static void s_validate_meta_request_checksum_on_finish(
168168 * The distribution of connections uses something called a weight, a ratio of part_number / part_size.
169169 * we can use the same weight while providing the connections to provide more connections to the same sized object if
170170 * the number of parts is higher.
171- * As part of experiments, we have identified that
172171 */
173172uint32_t s_calculate_meta_request_connections (struct aws_s3_client * client , struct aws_s3_meta_request * meta_request ) {
174173 AWS_PRECONDITION (client );
@@ -178,11 +177,14 @@ uint32_t s_calculate_meta_request_connections(struct aws_s3_client *client, stru
178177 double throughput_per_connection =
179178 meta_request -> is_express ? g_s3express_throughput_per_connection_gbps : g_s3_throughput_per_connection_gbps ;
180179
181- double achieved_weight = (3840 * 106.6 ) / (30 * 1024 * 1024 * 1024 * 200 );
180+ /* Assuming 8MB part size provides the ideal throughput we expect after amortization, we find a ratio with the
181+ * current part size to find what the scaled throughput per connections would be. Logically, small for smaller part
182+ * sizes, larger for larger part sizes. */
183+ double scaling_factor = (meta_request -> part_size > 0 ? ((MB_TO_BYTES (8 ) * 1.0 ) / meta_request -> part_size ) : 1 );
184+
182185 /* Calculate connections needed: target_throughput / throughput_per_connection */
183- double ideal_connections =
184- (client -> throughput_target_gbps * meta_request -> weight ) / (achieved_weight * throughput_per_connection );
185- uint32_t required_connections = (uint32_t )ceil (ideal_connections );
186+ double ideal_connections = client -> throughput_target_gbps / throughput_per_connection ;
187+ uint32_t required_connections = (uint32_t )ceil (ideal_connections ) * scaling_factor ;
186188
187189 /* Clamp to reasonable range */
188190 if (required_connections < g_min_num_connections ) {
0 commit comments