Skip to content

Commit f6aaedd

Browse files
committed
Add support for s3 canned ACLs for uploaded objects
1 parent 73ebe5a commit f6aaedd

File tree

7 files changed

+20
-9
lines changed

7 files changed

+20
-9
lines changed

docs/aws_s3_setup.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ Create an IAM Policy called `MedusaStorageStrategy`, with the following definiti
5151
"s3:GetReplicationConfiguration",
5252
"s3:ListMultipartUploadParts",
5353
"s3:PutObject",
54+
"s3:PutObjectAcl",
5455
"s3:GetObject",
5556
"s3:GetObjectTorrent",
5657
"s3:PutObjectRetention",

medusa-example.ini

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,9 @@ concurrent_transfers = 1
7171
; Size over which S3 uploads will be using the awscli with multi part uploads. Defaults to 100MB.
7272
multi_part_upload_threshold = 104857600
7373

74+
; Canned ACL for uploaded objects on S3. Defaults to private
75+
canned_acl = private
76+
7477
[monitoring]
7578
;monitoring_provider = <Provider used for sending metrics. Currently either of "ffwd" or "local">
7679

medusa/config.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,8 @@
2929
'StorageConfig',
3030
['bucket_name', 'key_file', 'prefix', 'fqdn', 'host_file_separator', 'storage_provider',
3131
'base_path', 'max_backup_age', 'max_backup_count', 'api_profile', 'transfer_max_bandwidth',
32-
'concurrent_transfers', 'multi_part_upload_threshold', 'host', 'region', 'port', 'secure', 'aws_cli_path']
32+
'concurrent_transfers', 'multi_part_upload_threshold', 'canned_acl', 'host',
33+
'region', 'port', 'secure', 'aws_cli_path']
3334
)
3435

3536
CassandraConfig = collections.namedtuple(
@@ -95,6 +96,7 @@ def load_config(args, config_file):
9596
'aws_cli_path': 'aws',
9697
'fqdn': socket.getfqdn(),
9798
'region': 'default',
99+
'canned_acl': 'private',
98100
}
99101

100102
config['logging'] = {

medusa/storage/aws_s3_storage/awscli.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ class AwsCli(object):
2727
def __init__(self, storage):
2828
self._config = storage.config
2929
self.storage = storage
30+
self.canned_acl = storage.config.canned_acl
3031

3132
@property
3233
def bucket_name(self):
@@ -73,7 +74,8 @@ def cp_upload(self, *, srcs, bucket_name, dest, max_retries=5):
7374
awscli_output = "/tmp/awscli_{0}.output".format(job_id)
7475
objects = []
7576
for src in srcs:
76-
cmd = [self._aws_cli_path, "s3", "cp", str(src), "s3://{}/{}".format(bucket_name, dest)]
77+
cmd = [self._aws_cli_path, "s3", "cp", "--acl", self.canned_acl,
78+
str(src), "s3://{}/{}".format(bucket_name, dest)]
7779
objects.append(self.upload_file(cmd, dest, awscli_output))
7880

7981
return objects

medusa/storage/aws_s3_storage/concurrent.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def with_storage(self, iterable):
6464

6565

6666
def upload_blobs(
67-
storage, src, dest, bucket, max_workers=None, multi_part_upload_threshold=0
67+
storage, src, dest, bucket, canned_acl, max_workers=None, multi_part_upload_threshold=0
6868
):
6969
"""
7070
Uploads a list of files from local storage concurrently to the remote storage.
@@ -80,14 +80,14 @@ def upload_blobs(
8080
job = StorageJob(
8181
storage,
8282
lambda storage, connection, src_file: __upload_file(
83-
storage, connection, src_file, dest, bucket, multi_part_upload_threshold
83+
storage, connection, src_file, dest, bucket, canned_acl, multi_part_upload_threshold
8484
),
8585
max_workers,
8686
)
8787
return job.execute(list(src))
8888

8989

90-
def __upload_file(storage, connection, src, dest, bucket, multi_part_upload_threshold):
90+
def __upload_file(storage, connection, src, dest, bucket, canned_acl, multi_part_upload_threshold):
9191
"""
9292
This function is called by StorageJob. It may be called concurrently by multiple threads.
9393
@@ -116,15 +116,16 @@ def __upload_file(storage, connection, src, dest, bucket, multi_part_upload_thre
116116
obj = _upload_multi_part(storage, connection, src, bucket, full_object_name)
117117
else:
118118
logging.debug("Uploading {} as single part".format(full_object_name))
119-
obj = _upload_single_part(connection, src, bucket, full_object_name)
119+
obj = _upload_single_part(connection, src, bucket, full_object_name, canned_acl)
120120

121121
return medusa.storage.ManifestObject(obj.name, int(obj.size), obj.hash)
122122

123123

124124
@retry(stop_max_attempt_number=MAX_UP_DOWN_LOAD_RETRIES, wait_fixed=5000)
125-
def _upload_single_part(connection, src, bucket, object_name):
125+
def _upload_single_part(connection, src, bucket, object_name, canned_acl):
126+
extra = {'content_type': 'application/octet-stream', 'acl': canned_acl}
126127
obj = connection.upload_object(
127-
str(src), container=bucket, object_name=object_name
128+
str(src), container=bucket, object_name=object_name, extra=extra
128129
)
129130

130131
return obj

medusa/storage/s3_storage.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,7 @@ def upload_blobs(self, srcs, dest):
157157
srcs,
158158
dest,
159159
self.bucket,
160+
canned_acl=self.config.canned_acl,
160161
max_workers=self.config.concurrent_transfers,
161162
multi_part_upload_threshold=int(self.config.multi_part_upload_threshold),
162163
)

tests/integration/features/steps/integration_steps.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,8 @@ def i_am_using_storage_provider_with_grpc_server(context, storage_provider, clie
426426
"multi_part_upload_threshold": 1 * 1024,
427427
"concurrent_transfers": 4,
428428
"prefix": storage_prefix,
429-
"aws_cli_path": "aws"
429+
"aws_cli_path": "aws",
430+
"canned_acl": "private"
430431
}
431432

432433
config["cassandra"] = {

0 commit comments

Comments
 (0)