Skip to content

Commit dda9b16

Browse files
author
Krish
committed
add new test for failure scenario
1 parent 941e55e commit dda9b16

File tree

2 files changed

+135
-6
lines changed

2 files changed

+135
-6
lines changed

tests/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -334,6 +334,7 @@ add_net_test_case(test_s3_list_bucket_valid)
334334
if(ENABLE_MOCK_SERVER_TESTS)
335335
add_net_test_case(multipart_upload_mock_server)
336336
add_net_test_case(multipart_upload_with_n_retries_mock_server)
337+
add_net_test_case(multipart_upload_failure_with_mock_server)
337338
add_net_test_case(multipart_upload_unsigned_with_trailer_checksum_mock_server)
338339
add_net_test_case(single_upload_unsigned_with_trailer_checksum_mock_server)
339340
add_net_test_case(multipart_upload_with_network_interface_names_mock_server)

tests/s3_mock_server_tests.c

Lines changed: 134 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ static int s_validate_create_multipart_upload_metrics(struct aws_s3_request_metr
119119
struct aws_http_headers *response_headers = NULL;
120120
ASSERT_SUCCESS(aws_s3_request_metrics_get_response_headers(metrics, &response_headers));
121121
const struct aws_string *request_id = NULL;
122-
ASSERT_SUCCESS(aws_s3_request_metrics_get_request_id(metrics, &request_id));
122+
ASSERT_SUCCESS(aws_s3_request_metrics_get_request_attempt_id(metrics, &request_id));
123123
ASSERT_TRUE(aws_string_eq_c_str(request_id, "12345"));
124124
const struct aws_string *ip_address = NULL;
125125
ASSERT_SUCCESS(aws_s3_request_metrics_get_ip_address(metrics, &ip_address));
@@ -165,11 +165,14 @@ static int s_validate_upload_part_metrics(struct aws_s3_request_metrics *metrics
165165

166166
AWS_ZERO_STRUCT(header_value);
167167
response_headers = NULL;
168-
ASSERT_SUCCESS(aws_s3_request_metrics_get_response_headers(metrics, &response_headers));
169-
ASSERT_SUCCESS(aws_http_headers_get(response_headers, aws_byte_cursor_from_c_str("ETag"), &header_value));
170-
ASSERT_TRUE(aws_byte_cursor_eq_c_str(&header_value, "b54357faf0632cce46e942fa68356b38"));
171-
ASSERT_SUCCESS(aws_http_headers_get(response_headers, aws_byte_cursor_from_c_str("Connection"), &header_value));
172-
ASSERT_TRUE(aws_byte_cursor_eq_c_str(&header_value, "keep-alive"));
168+
if (metrics->req_resp_info_metrics.response_status != -1) {
169+
ASSERT_SUCCESS(aws_s3_request_metrics_get_response_headers(metrics, &response_headers));
170+
ASSERT_SUCCESS(aws_http_headers_get(response_headers, aws_byte_cursor_from_c_str("ETag"), &header_value));
171+
ASSERT_TRUE(aws_byte_cursor_eq_c_str(&header_value, "b54357faf0632cce46e942fa68356b38"));
172+
ASSERT_SUCCESS(aws_http_headers_get(response_headers, aws_byte_cursor_from_c_str("Connection"), &header_value));
173+
ASSERT_TRUE(aws_byte_cursor_eq_c_str(&header_value, "keep-alive"));
174+
}
175+
173176
request_type = 0;
174177
aws_s3_request_metrics_get_request_type(metrics, &request_type);
175178
ASSERT_UINT_EQUALS(AWS_S3_REQUEST_TYPE_UPLOAD_PART, request_type);
@@ -195,6 +198,20 @@ static int s_validate_complete_multipart_upload_metrics(struct aws_s3_request_me
195198
return AWS_OP_SUCCESS;
196199
}
197200

201+
static int s_validate_abort_multipart_upload_metrics(struct aws_s3_request_metrics *metrics) {
202+
enum aws_s3_request_type request_type = 0;
203+
const struct aws_string *operation_name = NULL;
204+
205+
aws_s3_request_metrics_get_request_type(metrics, &request_type);
206+
ASSERT_UINT_EQUALS(AWS_S3_REQUEST_TYPE_ABORT_MULTIPART_UPLOAD, request_type);
207+
ASSERT_SUCCESS(aws_s3_request_metrics_get_operation_name(metrics, &operation_name));
208+
ASSERT_STR_EQUALS("AbortMultipartUpload", aws_string_c_str(operation_name));
209+
210+
ASSERT_SUCCESS(s_validate_time_metrics(metrics, true));
211+
212+
return AWS_OP_SUCCESS;
213+
}
214+
198215
static int s_validate_mpu_mock_server_metrics(struct aws_array_list *metrics_list, uint32_t expected_length) {
199216
/* Check the size of the metrics should be the same as the number of requests, which should be create MPU, two
200217
* upload parts and one complete MPU */
@@ -228,6 +245,9 @@ static int s_validate_retry_metrics(struct aws_array_list *metrics_list, uint32_
228245
ASSERT_SUCCESS(s_validate_create_multipart_upload_metrics(metrics));
229246

230247
/* All of the middle should be Upload Parts*/
248+
/* This check assumes each request fails 'expected_failures' number of times and then succeeds.
249+
* So for each part, we would have expected_failures + 1 attempts.
250+
* We make sure all of the attempts have the same request_first_attempt_start_time and the last attempt has an end time. */
231251
size_t failed_count = 0;
232252
for (size_t i = 1; i < aws_array_list_length(metrics_list) - 1; i = i + expected_failures + 1) {
233253
aws_array_list_get_at(metrics_list, &metrics, i);
@@ -256,6 +276,47 @@ static int s_validate_retry_metrics(struct aws_array_list *metrics_list, uint32_
256276
return AWS_OP_SUCCESS;
257277
}
258278

279+
static int s_validate_fail_metrics(struct aws_array_list *metrics_list, uint32_t parts) {
280+
struct aws_s3_request_metrics *metrics = NULL, *metrics2 = NULL;
281+
282+
/* First metrics should be the CreateMPU */
283+
aws_array_list_get_at(metrics_list, (void **)&metrics, 0);
284+
ASSERT_SUCCESS(s_validate_create_multipart_upload_metrics(metrics));
285+
286+
/* It is difficult to simulate forced failure and be precise about how a request fails if there are multiple connections.
287+
* Assuming, the first request itself is failing, all of the other parts are force cancelled.
288+
* If there are n parts, we would record n + 5 metrics. 5 retries for the first part alone. */
289+
290+
/* First part fails 5 times */
291+
aws_array_list_get_at(metrics_list, &metrics, 1);
292+
ASSERT_TRUE(metrics->crt_info_metrics.error_code != AWS_ERROR_SUCCESS);
293+
for(size_t i = 1; i < 6; i++) {
294+
aws_array_list_get_at(metrics_list, &metrics2, i + 1);
295+
ASSERT_INT_EQUALS(
296+
metrics->time_metrics.s3_request_first_attempt_start_timestamp_ns,
297+
metrics2->time_metrics.s3_request_first_attempt_start_timestamp_ns);
298+
ASSERT_INT_EQUALS(metrics->crt_info_metrics.retry_attempt + 1, metrics2->crt_info_metrics.retry_attempt);
299+
ASSERT_TRUE(metrics2->crt_info_metrics.error_code != AWS_ERROR_SUCCESS);
300+
ASSERT_SUCCESS(s_validate_upload_part_metrics(metrics, false));
301+
metrics = metrics2;
302+
}
303+
ASSERT_SUCCESS(s_validate_upload_part_metrics(metrics, true));
304+
305+
/* Rest of the request should have been cancelled */
306+
for(size_t i = 7; i < parts + 6; i++){
307+
aws_array_list_get_at(metrics_list, &metrics, i);
308+
ASSERT_TRUE(metrics->crt_info_metrics.error_code == AWS_ERROR_S3_CANCELED);
309+
ASSERT_SUCCESS(s_validate_upload_part_metrics(metrics, true));
310+
}
311+
312+
/* Last metrics should be AbortMPU*/
313+
metrics = NULL;
314+
aws_array_list_get_at(metrics_list, (void **)&metrics, aws_array_list_length(metrics_list) - 1);
315+
ASSERT_SUCCESS(s_validate_abort_multipart_upload_metrics(metrics));
316+
317+
return AWS_OP_SUCCESS;
318+
}
319+
259320
TEST_CASE(multipart_upload_mock_server) {
260321
(void)ctx;
261322

@@ -371,6 +432,73 @@ TEST_CASE(multipart_upload_with_n_retries_mock_server) {
371432
return AWS_OP_SUCCESS;
372433
}
373434

435+
static void s_upload_part_force_fail(struct aws_s3_request *request, struct aws_http_message *message) {
436+
if (message == NULL) {
437+
return;
438+
}
439+
440+
struct aws_http_header throttle_header = {
441+
.name = aws_byte_cursor_from_c_str("force_throttle"),
442+
.value = aws_byte_cursor_from_c_str("true"),
443+
};
444+
aws_http_message_add_header(message, throttle_header);
445+
}
446+
447+
TEST_CASE(multipart_upload_failure_with_mock_server) {
448+
(void)ctx;
449+
450+
struct aws_s3_tester tester;
451+
ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));
452+
int part_size = 5;
453+
454+
struct aws_s3_tester_client_options client_options = {
455+
.part_size = MB_TO_BYTES(part_size),
456+
.tls_usage = AWS_S3_TLS_DISABLED,
457+
.max_active_connections_override = 1,
458+
};
459+
460+
struct aws_s3_client *client = NULL;
461+
ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client));
462+
struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL);
463+
patched_client_vtable->after_prepare_upload_part_finish = s_upload_part_force_fail;
464+
465+
int object_size = 10;
466+
int parts = object_size / part_size;
467+
468+
struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/default");
469+
{
470+
/* 1. Trailer checksum */
471+
struct aws_s3_tester_meta_request_options put_options = {
472+
.allocator = allocator,
473+
.meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
474+
.client = client,
475+
.checksum_algorithm = AWS_SCA_CRC32,
476+
.validate_get_response_checksum = false,
477+
.put_options =
478+
{
479+
.object_size_mb = object_size,
480+
.object_path_override = object_path,
481+
},
482+
.mock_server = true,
483+
.validate_type = AWS_S3_TESTER_VALIDATE_TYPE_NO_VALIDATE,
484+
};
485+
486+
struct aws_s3_meta_request_test_results meta_request_test_results;
487+
488+
// check if number of metrics received for each part is n
489+
aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator);
490+
ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &meta_request_test_results));
491+
ASSERT_SUCCESS(
492+
s_validate_fail_metrics(&meta_request_test_results.synced_data.metrics, parts));
493+
494+
aws_s3_meta_request_test_results_clean_up(&meta_request_test_results);
495+
}
496+
aws_s3_client_release(client);
497+
aws_s3_tester_clean_up(&tester);
498+
499+
return AWS_OP_SUCCESS;
500+
}
501+
374502
/* Singleton used by tests in this file */
375503
static struct get_requests_header_tester {
376504
struct aws_allocator *alloc;

0 commit comments

Comments
 (0)