Skip to content

Commit 4a072cc

Browse files
committed
why we try to finalize the checksum on error?
1 parent c27e829 commit 4a072cc

File tree

7 files changed

+120
-7
lines changed

7 files changed

+120
-7
lines changed

source/s3_checksum_stream.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ static void s_aws_input_checksum_stream_destroy(struct aws_checksum_stream *impl
113113
}
114114

115115
/* Compute the checksum of whatever was read, if we didn't reach the end of the underlying stream. */
116-
s_finalize_checksum(impl);
116+
// s_finalize_checksum(impl);
117117

118118
aws_checksum_destroy(impl->checksum);
119119
aws_input_stream_release(impl->old_stream);

tests/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -331,6 +331,7 @@ if(ENABLE_MOCK_SERVER_TESTS)
331331
add_net_test_case(multipart_upload_unsigned_with_trailer_checksum_mock_server)
332332
add_net_test_case(single_upload_unsigned_with_trailer_checksum_mock_server)
333333
add_net_test_case(multipart_upload_with_network_interface_names_mock_server)
334+
add_net_test_case(multipart_upload_checksum_with_retry_before_finish_mock_server)
334335
add_net_test_case(multipart_upload_checksum_with_retry_mock_server)
335336
add_net_test_case(multipart_download_checksum_with_retry_mock_server)
336337
add_net_test_case(async_internal_error_from_complete_multipart_mock_server)
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
{
2+
"status": 503,
3+
"headers": {"ETag": "b54357faf0632cce46e942fa68356b38", "Connection": "keep-alive"},
4+
"body": [
5+
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>",
6+
"",
7+
"<Error>",
8+
"<Code>SlowDown</Code>",
9+
"<RequestId>656c76696e6727732072657175657374</RequestId>",
10+
"<HostId>Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==</HostId>",
11+
"</Error>"
12+
]
13+
}

tests/mock_s3_server/mock_s3_server.py

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
# Flags to keep between requests
2424
SHOULD_THROTTLE = True
25+
SHOULD_SKIP_WAIT = True
2526
RETRY_REQUEST_COUNT = 0
2627

2728

@@ -63,6 +64,7 @@ def _resolve_file_path(self, wrapper, request_type):
6364
if self.json_path is None:
6465
response_file = os.path.join(
6566
base_dir, request_type.name, f"{self.path[1:]}.json")
67+
print(response_file)
6668
if os.path.exists(response_file) == False:
6769
wrapper.info(
6870
response_file, "not exist, using the default response")
@@ -75,6 +77,7 @@ def _resolve_file_path(self, wrapper, request_type):
7577
response_file = os.path.join(
7678
base_dir, request_type.name, f"default.json")
7779
else:
80+
print("throttle")
7881
wrapper.info("Throttling")
7982
# Flip the flag
8083
SHOULD_THROTTLE = not SHOULD_THROTTLE
@@ -480,12 +483,19 @@ async def handle_mock_s3_request(wrapper, request):
480483
# TODO: support more type.
481484
wrapper.info("unsupported request:", request)
482485
request_type = S3Opts.CreateMultipartUpload
483-
484-
while True:
485-
event = await wrapper.next_event()
486-
if type(event) is h11.EndOfMessage:
487-
break
488-
assert type(event) is h11.Data
486+
global SHOULD_SKIP_WAIT
487+
if "before_finish" not in parsed_path.path or request_type is not S3Opts.UploadPart:
488+
# restore the state
489+
SHOULD_SKIP_WAIT = True
490+
if "before_finish" in parsed_path.path and SHOULD_SKIP_WAIT and request_type is S3Opts.UploadPart:
491+
print("not wait for the request to complete")
492+
SHOULD_SKIP_WAIT = False
493+
else:
494+
while True:
495+
event = await wrapper.next_event()
496+
if type(event) is h11.EndOfMessage:
497+
break
498+
assert type(event) is h11.Data
489499

490500
if response_config is None:
491501
response_config = ResponseConfig(parsed_path.path)

tests/s3_mock_server_tests.c

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -442,6 +442,93 @@ static void s_after_prepare_upload_part_finish(struct aws_s3_request *request) {
442442
}
443443
}
444444

445+
/**
446+
* This test is built for
447+
* 1. The retry happens before the upload has finished.
448+
*/
449+
TEST_CASE(multipart_upload_checksum_with_retry_before_finish_mock_server) {
450+
(void)ctx;
451+
struct aws_s3_tester tester;
452+
ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));
453+
struct aws_s3_tester_client_options client_options = {
454+
.part_size = MB_TO_BYTES(5),
455+
.tls_usage = AWS_S3_TLS_DISABLED,
456+
};
457+
458+
struct aws_s3_client *client = NULL;
459+
ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client));
460+
struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL);
461+
patched_client_vtable->after_prepare_upload_part_finish = s_after_prepare_upload_part_finish;
462+
463+
struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/throttle_before_finish");
464+
{
465+
/* 1. Trailer checksum */
466+
struct aws_s3_tester_meta_request_options put_options = {
467+
.allocator = allocator,
468+
.meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
469+
.client = client,
470+
.checksum_algorithm = AWS_SCA_CRC32,
471+
.validate_get_response_checksum = false,
472+
.put_options =
473+
{
474+
.object_size_mb = 10,
475+
.object_path_override = object_path,
476+
.sleep_before_read_secs = 1,
477+
},
478+
.mock_server = true,
479+
};
480+
481+
struct aws_s3_meta_request_test_results meta_request_test_results;
482+
aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator);
483+
484+
ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &meta_request_test_results));
485+
486+
ASSERT_INT_EQUALS(meta_request_test_results.upload_review.part_count, 2);
487+
/* Note: the data we currently generate is always the same,
488+
* so make sure that retry does not mangle the data by checking the checksum value */
489+
ASSERT_STR_EQUALS(
490+
"7/xUXw==", aws_string_c_str(meta_request_test_results.upload_review.part_checksums_array[0]));
491+
ASSERT_STR_EQUALS(
492+
"PCOjcw==", aws_string_c_str(meta_request_test_results.upload_review.part_checksums_array[1]));
493+
aws_s3_meta_request_test_results_clean_up(&meta_request_test_results);
494+
}
495+
{
496+
/* 2. header checksum */
497+
struct aws_s3_tester_meta_request_options put_options = {
498+
.allocator = allocator,
499+
.meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
500+
.client = client,
501+
.checksum_algorithm = AWS_SCA_CRC32,
502+
.checksum_via_header = true,
503+
.validate_get_response_checksum = false,
504+
.put_options =
505+
{
506+
.object_size_mb = 10,
507+
.object_path_override = object_path,
508+
},
509+
.mock_server = true,
510+
};
511+
512+
struct aws_s3_meta_request_test_results meta_request_test_results;
513+
aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator);
514+
515+
ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &meta_request_test_results));
516+
517+
ASSERT_INT_EQUALS(meta_request_test_results.upload_review.part_count, 2);
518+
/* Note: the data we currently generate is always the same,
519+
* so make sure that retry does not mangle the data by checking the checksum value */
520+
ASSERT_STR_EQUALS(
521+
"7/xUXw==", aws_string_c_str(meta_request_test_results.upload_review.part_checksums_array[0]));
522+
ASSERT_STR_EQUALS(
523+
"PCOjcw==", aws_string_c_str(meta_request_test_results.upload_review.part_checksums_array[1]));
524+
aws_s3_meta_request_test_results_clean_up(&meta_request_test_results);
525+
}
526+
aws_s3_client_release(client);
527+
aws_s3_tester_clean_up(&tester);
528+
529+
return AWS_OP_SUCCESS;
530+
}
531+
445532
/**
446533
* This test is built for
447534
* 1. We had a memory leak when the retry was triggered and the checksum was calculated.

tests/s3_tester.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1681,6 +1681,7 @@ int aws_s3_tester_send_meta_request_with_options(
16811681
.autogen_length = upload_size_bytes,
16821682
.eof_requires_extra_read = options->put_options.eof_requires_extra_read,
16831683
.max_bytes_per_read = options->put_options.max_bytes_per_read,
1684+
.sleep_before_read_secs = options->put_options.sleep_before_read_secs,
16841685
},
16851686
};
16861687
if (options->put_options.invalid_input_stream) {

tests/s3_tester.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -229,6 +229,7 @@ struct aws_s3_tester_meta_request_options {
229229
struct aws_byte_cursor content_encoding;
230230
enum aws_s3_tester_full_object_checksum full_object_checksum;
231231
struct aws_byte_cursor if_none_match_header;
232+
size_t sleep_before_read_secs;
232233
} put_options;
233234

234235
enum aws_s3_tester_sse_type sse_type;

0 commit comments

Comments
 (0)