Skip to content

Commit 29ceb35

Browse files
Fix issue with error response parting potentially overriding upload buffer (#528)
1 parent e777d53 commit 29ceb35

File tree

3 files changed

+34
-31
lines changed

3 files changed

+34
-31
lines changed

include/aws/s3/private/s3_meta_request_impl.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,6 @@ struct aws_s3_meta_request {
242242
* The length will always be less than part-size */
243243
struct aws_byte_buf buffered_data;
244244
struct aws_s3_buffer_ticket *buffered_data_ticket;
245-
struct aws_future_s3_buffer_ticket *buffered_ticket_future;
246245

247246
/* Waker callback.
248247
* Stored if a poll_write() call returns result.is_pending

source/s3_meta_request.c

Lines changed: 23 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1428,14 +1428,18 @@ static int s_s3_meta_request_incoming_body(
14281428
}
14291429

14301430
if (request->send_data.response_body.capacity == 0) {
1431-
if (request->ticket != NULL) {
1431+
/* Make sure that request is get, since puts can also have ticket allocated, which is used for request body. */
1432+
if (request->request_type == AWS_S3_REQUEST_TYPE_GET_OBJECT && request->ticket != NULL) {
14321433
request->send_data.response_body = aws_s3_buffer_ticket_claim(request->ticket);
14331434
} else {
14341435
size_t buffer_size = s_dynamic_body_initial_buf_size;
14351436
aws_byte_buf_init(&request->send_data.response_body, meta_request->allocator, buffer_size);
14361437
}
14371438
}
14381439

1440+
/* Sanity checking that we somehow haven't reused the same buffer for both upload and download*/
1441+
AWS_FATAL_ASSERT(request->send_data.response_body.buffer != request->request_body.buffer);
1442+
14391443
/* Note: not having part sized response body means the buffer is dynamic and
14401444
* can grow. */
14411445
if (s_response_body_append(&request->send_data.response_body, data)) {
@@ -2366,54 +2370,44 @@ struct aws_s3_meta_request_poll_write_result aws_s3_meta_request_poll_write(
23662370
/* If we don't already have a buffer, grab one from the pool. */
23672371
if (meta_request->synced_data.async_write.buffered_data_ticket == NULL) {
23682372

2369-
if (meta_request->synced_data.async_write.buffered_data_ticket == NULL &&
2370-
meta_request->synced_data.async_write.buffered_ticket_future == NULL) {
2371-
/* NOTE: we acquire a forced-buffer because there's a risk of deadlock if we
2372-
* waited for a normal ticket reservation, respecting the pool's memory limit.
2373-
* (See "test_s3_many_async_uploads_without_data" for description of deadlock scenario) */
2374-
2375-
struct aws_s3_buffer_pool_reserve_meta meta = {
2376-
.size = meta_request->part_size,
2377-
.can_block = true,
2378-
.meta_request = meta_request,
2379-
.client = meta_request->client};
2373+
struct aws_future_s3_buffer_ticket *buffered_ticket_future;
2374+
/* NOTE: we acquire a forced-buffer because there's a risk of deadlock if we
2375+
* waited for a normal ticket reservation, respecting the pool's memory limit.
2376+
* (See "test_s3_many_async_uploads_without_data" for description of deadlock scenario) */
23802377

2381-
meta_request->synced_data.async_write.buffered_ticket_future =
2382-
aws_s3_buffer_pool_reserve(meta_request->client->buffer_pool, meta);
2378+
struct aws_s3_buffer_pool_reserve_meta meta = {
2379+
.size = meta_request->part_size,
2380+
.can_block = true,
2381+
.meta_request = meta_request,
2382+
.client = meta_request->client};
23832383

2384-
AWS_FATAL_ASSERT(meta_request->synced_data.async_write.buffered_ticket_future);
2385-
}
2384+
buffered_ticket_future = aws_s3_buffer_pool_reserve(meta_request->client->buffer_pool, meta);
2385+
AWS_FATAL_ASSERT(buffered_ticket_future);
23862386

2387-
if (aws_future_s3_buffer_ticket_is_done(meta_request->synced_data.async_write.buffered_ticket_future)) {
2388-
if (aws_future_s3_buffer_ticket_get_error(
2389-
meta_request->synced_data.async_write.buffered_ticket_future) != AWS_OP_SUCCESS) {
2387+
if (aws_future_s3_buffer_ticket_is_done(buffered_ticket_future)) {
2388+
if (aws_future_s3_buffer_ticket_get_error(buffered_ticket_future) != AWS_OP_SUCCESS) {
23902389
AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "id=%p: Failed to acquire buffer.", (void *)meta_request);
23912390
illegal_usage_terminate_meta_request = true;
23922391
} else {
23932392
meta_request->synced_data.async_write.buffered_data_ticket =
2394-
aws_future_s3_buffer_ticket_get_result_by_move(
2395-
meta_request->synced_data.async_write.buffered_ticket_future);
2393+
aws_future_s3_buffer_ticket_get_result_by_move(buffered_ticket_future);
23962394

2397-
meta_request->synced_data.async_write.buffered_ticket_future = aws_future_s3_buffer_ticket_release(
2398-
meta_request->synced_data.async_write.buffered_ticket_future);
2395+
aws_future_s3_buffer_ticket_release(buffered_ticket_future);
23992396

24002397
meta_request->synced_data.async_write.buffered_data =
24012398
aws_s3_buffer_ticket_claim(meta_request->synced_data.async_write.buffered_data_ticket);
24022399
}
24032400
} else {
2404-
/* Failing to acquire memory synchronously is a hard error for now. Consider relaxing this in future. */
2405-
meta_request->synced_data.async_write.buffered_ticket_future =
2406-
aws_future_s3_buffer_ticket_release(meta_request->synced_data.async_write.buffered_ticket_future);
2407-
2408-
AWS_LOGF_TRACE(
2401+
AWS_LOGF_ERROR(
24092402
AWS_LS_S3_META_REQUEST,
24102403
"id=%p: Illegal call to write(). Failed to acquire buffer memory.",
24112404
(void *)meta_request);
24122405
illegal_usage_terminate_meta_request = true;
2406+
aws_future_s3_buffer_ticket_release(buffered_ticket_future);
24132407
}
24142408
}
24152409

2416-
if (!illegal_usage_terminate_meta_request && !result.is_pending) {
2410+
if (!illegal_usage_terminate_meta_request) {
24172411
/* Copy as much data as we can into the buffer */
24182412
struct aws_byte_cursor processed_data =
24192413
aws_byte_buf_write_to_capacity(&meta_request->synced_data.async_write.buffered_data, &data);

tests/s3_mock_server_tests.c

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -466,10 +466,20 @@ TEST_CASE(multipart_upload_checksum_with_retry_mock_server) {
466466
.mock_server = true,
467467
};
468468

469-
ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL));
469+
struct aws_s3_meta_request_test_results meta_request_test_results;
470+
aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator);
471+
472+
ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &meta_request_test_results));
473+
474+
ASSERT_INT_EQUALS(meta_request_test_results.upload_review.part_count, 2);
475+
/* Note: the data we currently generate is always the same,
476+
* so make sure that retry does not mangle the data by checking the checksum value */
477+
ASSERT_STR_EQUALS("7/xUXw==", aws_string_c_str(meta_request_test_results.upload_review.part_checksums_array[0]));
478+
ASSERT_STR_EQUALS("PCOjcw==", aws_string_c_str(meta_request_test_results.upload_review.part_checksums_array[1]));
470479

471480
aws_s3_client_release(client);
472481
aws_s3_tester_clean_up(&tester);
482+
aws_s3_meta_request_test_results_clean_up(&meta_request_test_results);
473483

474484
return AWS_OP_SUCCESS;
475485
}

0 commit comments

Comments
 (0)