@@ -321,6 +321,7 @@ struct aws_s3_file_io_options {
321321 * Skip buffering the part in memory before sending the request.
322322 * If set, set the `disk_throughput_gbps` to be reasonable align with the available disk throughput.
323323 * Otherwise, the transfer may fail with connection starvation.
324+ *
324325 * Default to false.
325326 **/
326327 bool should_stream ;
@@ -333,6 +334,8 @@ struct aws_s3_file_io_options {
333334 * Notes: There are possibilities that cannot reach the all available disk throughput:
334335 * 1. Disk is busy with other applications
335336 * 2. OS Cache may cap the throughput, use `direct_io` to get around this.
337+ *
338+ * Note: When `streaming_upload` is true, this default to 10 Gbps.
336339 **/
337340 double disk_throughput_gbps ;
338341
@@ -475,7 +478,7 @@ struct aws_s3_client_config {
475478 * Optional.
476479 * If set, this controls how the client interact with file I/O.
477480 * Read `aws_s3_file_io_options` for details.
478- * Notes: Only applies when AWS_S3_META_REQUEST_TYPE_PUT_OBJECT is set.
481+ * Notes: Only applies to meta requests with `send_filepath` set.
479482 * TODO: adapt it to `recv_filepath`.
480483 *
481484 * eg:
@@ -821,6 +824,9 @@ struct aws_s3_meta_request_options {
821824 * Notes: Only applies when `send_filepath` is set.
822825 * TODO: adapt it to `recv_filepath`.
823826 *
827+ * Note: if both client and meta request don't set this, for objects larger than 2TiB, this will be set to a default
828+ * options with `should_stream` to be True and others follow the default to avoid memory issues.
829+ *
824830 * eg:
825831 * - When the file is too large to fit in the buffer, set `should_stream` to avoid buffering the whole parts in
826832 * memory.
0 commit comments