-
Notifications
You must be signed in to change notification settings - Fork 10
Expand file tree
/
Copy pathstorage.py
More file actions
1585 lines (1305 loc) · 58.9 KB
/
storage.py
File metadata and controls
1585 lines (1305 loc) · 58.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from __future__ import annotations
import abc
import base64
import dataclasses
import datetime
import hashlib
import io
import os
import random
import re
import shutil
import urllib.parse
import uuid
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
BinaryIO,
Iterable,
Iterator,
Protocol,
TextIO,
cast,
runtime_checkable,
)
import boto3
import magic
from botocore import exceptions as botocore_exceptions
from botocore.response import StreamingBody
from django.conf import settings
from django.urls import reverse
from django.utils.module_loading import import_string
from xocto import events, localtime
from . import files, s3_select
if TYPE_CHECKING:
from _typeshed import WriteableBuffer
from mypy_boto3_s3 import service_resource
from mypy_boto3_s3.client import S3Client
# This URL is returned from `fetch_url()` in cases where a legitimate URL
# cannot be returned. For example, a legitimate URL cannot be returned
# for files stored in the `MemoryFileStore` during testing.
TEST_FETCH_URL = "http://www.example.com/file.txt"
TEMP_FOLDER = "/tmp" # nosec
# The maximum length allowed for an S3 key
MAX_KEY_LENGTH = 1024
# S3 select can have a max size of 1 MB.
MAX_S3_SELECT_SIZE_RANGE = 1_048_576
# Regex for s3 URLs in the "virtual hosted" format
S3_VIRTUAL_HOSTED_URL_RE = re.compile(
r"https://(?P<bucket>.+).s3.((?P<region>.*).)?amazonaws.com/(?P<key>.+)"
)
def _should_raise_error_on_existing_files() -> bool:
"""
Check if we should error when a file is being overwritten without explicit overwrite=True.
"""
return False
def _log_existing_file_returned(filename: str) -> None:
events.publish("storage.file.existing-returned", params={"filename": filename})
class FileExists(Exception):
pass
class KeyDoesNotExist(Exception):
pass
class BucketNotVersioned(Exception):
pass
class S3SelectUnexpectedResponse(Exception):
pass
@dataclasses.dataclass(frozen=True)
class PreSignedPost:
"""
Pre-signed post url.
For more details see:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-presigned-urls.html#generating-a-presigned-url-to-upload-a-file
"""
url: str
key: str
fields: dict[str, Any]
@dataclasses.dataclass(frozen=True)
class S3Object:
bucket_name: str
key: str
version_id: str | None = None
@runtime_checkable
class Clearable(Protocol):
def clear(self) -> None:
...
class ReadableBinaryFile(Protocol):
def read(self, size: int = ...) -> bytes:
...
class StreamingBodyIOAdapter(io.RawIOBase):
"""
Wrapper to adapt a boto3 S3 object to a standard Python "file-like" object
Boto3 returns S3 files as instances as of botocore.response.StreamingBody. These have a `read`
method but are otherwise not quite "file-like" enough to be drop in equivalents of anything in
the normal Python IO hierarchy.
This class is an subclass of io.RawIOBase which works by wrapping a boto StreamingBody. The
storage module uses it so that it can return normal looking Python file objects and hide
whether the backing store is S3 or not.
"""
def __init__(self, streaming_body: StreamingBody) -> None:
self.streaming_body = streaming_body
def readable(self) -> bool:
return True
def readinto(self, buffer: WriteableBuffer) -> int:
"""
Read bytes into a pre-allocated, writable buffer-like object
Mutates `buffer` and returns the number of bytes read.
"""
# buffer has to be typed as `buffer: WriteableBuffer` because that's how the superclass is
# typed. However, WriteableBuffer is a Union of several Python buffer-like objects, not all
# of which support __len__ or __set_item__ in the way needed by our implementation of this
# method. This assert exists to make mypy pass but isn't totally satisfying because in
# theory there might be a way that this method is called with one of the weirder buffer
# classes in the Union, like pickle.PickleBuffer.
# See https://github.com/python/typeshed/blob/master/stdlib/_typeshed/__init__.pyi for the
# definition of WriteableBuffer.
assert isinstance(buffer, (bytearray, memoryview))
requested_size = len(buffer)
data = self.streaming_body.read(requested_size)
data_size = len(data)
buffer[:data_size] = data
return data_size
def close(self) -> None:
self.streaming_body.close()
super().close()
class BaseS3FileStore(abc.ABC):
# Define the interface for subclasses
def __init__(
self,
bucket_name: str,
use_date_in_key_path: bool = True,
set_acl_bucket_owner: bool = False,
) -> None:
self.bucket_name = bucket_name
self.date_in_key_path = use_date_in_key_path
self.set_acl_bucket_owner = set_acl_bucket_owner
@abc.abstractmethod
def store_file(
self,
namespace: str,
filename: str,
contents: AnyStr | ReadableBinaryFile,
content_type: str = "",
overwrite: bool = False,
metadata: dict[str, str] | None = None,
) -> tuple[str, str]:
"""
Store a file in S3 given its filename and contents. Contents should be UTF-8 encoded.
"""
raise NotImplementedError()
@abc.abstractmethod
def store_versioned_file(
self,
key_path: str,
contents: AnyStr | io.BytesIO,
content_type: str = "",
) -> tuple[str, str, str]:
"""
Store a file in S3 given its filename and contents. Contents should be UTF-8 encoded.
The bucket must have versioning enabled.
If the key is not known, `make_key_path` should be called to generate it.
:raises BucketNotVersioned: if the bucket does not have versioning enabled.
"""
raise NotImplementedError()
@abc.abstractmethod
def store_filepath(
self, namespace: str, filepath: str, overwrite: bool = False, dest_filepath: str = ""
) -> tuple[str, str]:
raise NotImplementedError()
def make_key_path(self, *, namespace: str = "", filepath: str) -> str:
"""
Return the full file (key) path given a namespace and filepath.
This is normally just `{namespace}/{filepath}` except when `self.use_date_in_key_path` is
true, in which case today's date is inserted in between:
`{namespace}/2021/07/20/{filepath}`
Use this to determine the key path that would be returned by the `store_file*` functions
for the same namespace and filepath.
"""
if namespace:
# Remove trailing slash to avoid creating a "directory" named "/" in the path.
namespace = namespace.rstrip("/")
if self.date_in_key_path:
today = localtime.today()
parts = [v for v in [namespace, today.strftime("%Y/%m/%d"), filepath] if v]
else:
parts = [v for v in [namespace, filepath] if v]
key_path = os.path.join(*parts)
if len(key_path) > MAX_KEY_LENGTH:
raise RuntimeError(
f"Generated `key_path` must not exceed {MAX_KEY_LENGTH} characters in length"
)
return key_path
@abc.abstractmethod
def get_key(self, key_path: str, version_id: str | None = None) -> S3Object:
raise NotImplementedError()
def get_key_or_store_file(
self,
*,
namespace: str = "",
filepath: str,
contents: bytes,
content_type: str = "",
) -> tuple[tuple[str, str], bool]:
"""
Return the full key for `namespace` and `filepath`, writing `contents` if there's nothing
already at that key path.
Warning: this is open to race conditions for certain storage backends (S3) because there's
no way to transactionally create an object at a key. E.g.:
* Process 1 does get_key_or_store_file
* Process 1 sees there's no file
* Before process 1 can create the file at the key
* Process 2 does get_key_or_store_file
* Process 2 also sees there's no file
* Whichever process finishes the writing to the key last wins
"""
key_path = self.make_key_path(namespace=namespace, filepath=filepath)
if self.exists(key_path):
return (self.bucket_name, key_path), False
self.store_file(
namespace=namespace, filename=filepath, contents=contents, content_type=content_type
)
return (self.bucket_name, key_path), True
@abc.abstractmethod
def get_file_type(self, key_path: str) -> str:
raise NotImplementedError()
@abc.abstractmethod
def fetch_file(self, key_path: str, version_id: str | None = None) -> StreamingBody:
raise NotImplementedError()
@abc.abstractmethod
def fetch_file_contents(self, key_path: str, version_id: str | None = None) -> bytes:
raise NotImplementedError()
def fetch_text_file(
self, key_path: str, encoding: str | None = None, errors: str | None = None
) -> TextIO:
"""
Return a file from storage as a TextIO "file-like" object.
"""
streaming_body = self.fetch_file(key_path)
raw_io = StreamingBodyIOAdapter(streaming_body)
buffered_io = io.BufferedReader(raw_io)
return io.TextIOWrapper(buffered_io, encoding=encoding, errors=errors)
@abc.abstractmethod
def fetch_url(
self,
key_path: str,
expires_in: int = 60,
response_headers: dict[str, str] | None = None,
version_id: str | None = None,
) -> str:
raise NotImplementedError()
@abc.abstractmethod
def generate_presigned_post(self, *, key_path: str, expires_in: int = 60) -> PreSignedPost:
raise NotImplementedError
@abc.abstractmethod
def exists(self, key_path: str, as_file: bool = False) -> bool:
raise NotImplementedError()
@abc.abstractmethod
def list_s3_keys(self, namespace: str = "") -> Iterable[S3Object]:
raise NotImplementedError()
def list_s3_keys_page( # nosec B107
self,
namespace: str = "",
*,
next_token: str = "",
max_keys: int = 100,
) -> tuple[Iterable[S3Object], str]:
"""Pass-thru to list_s3_keys"""
return self.list_s3_keys(namespace), ""
@abc.abstractmethod
def list_files(self, namespace: str = "") -> Iterable[str]:
raise NotImplementedError()
def download_file(self, key_path: str) -> BinaryIO:
filepath = self._build_download_filepath(key_path)
_create_parent_directories(filepath)
with open(filepath, "wb") as f:
f.write(self.fetch_file_contents(key_path))
return f
def download_to_file(self, key_path: str, file_path: str) -> None:
with open(file_path, "wb") as f:
f.write(self.fetch_file_contents(key_path))
@abc.abstractmethod
def get_size_in_bytes(self, *, s3_object: S3Object) -> int:
raise NotImplementedError()
@abc.abstractmethod
def get_last_modified(self, key_path: str) -> datetime.datetime:
raise NotImplementedError()
@abc.abstractmethod
def copy(self, *, s3_object: S3Object, destination: str) -> S3Object:
raise NotImplementedError()
@abc.abstractmethod
def rename(self, *, s3_object: S3Object, destination: str) -> S3Object:
"""
Rename an S3 object.
The behaviour in a versioned bucket is undefined.
It should be defined once a clear use case has been identified.
"""
raise NotImplementedError()
@abc.abstractmethod
def delete(self, *, s3_object: S3Object) -> None:
"""
Delete an object from S3.
If the bucket is versioned, this will only delete the specified object version.
"""
raise NotImplementedError()
def _build_download_filepath(self, key_path: str) -> str:
"""
Assemble the full filename to create when downloading a file.
Args:
key_path (string): the S3 key of the file that will be downloaded.
"""
return os.path.join(TEMP_FOLDER, self.bucket_name, key_path)
class S3FileStore(BaseS3FileStore):
ACL_BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
def __init__(
self,
bucket_name: str,
use_date_in_key_path: bool = True,
set_acl_bucket_owner: bool = False,
) -> None:
if not (3 <= len(bucket_name) <= 63):
raise ValueError(
f"`bucket_name` must be between 3 and 63 characters in length: {bucket_name}"
)
super().__init__(
bucket_name,
use_date_in_key_path=use_date_in_key_path,
set_acl_bucket_owner=set_acl_bucket_owner,
)
def __str__(self) -> str:
return f"S3 FileStore for bucket {self.bucket_name}"
def store_file(
self,
namespace: str,
filename: str,
contents: AnyStr | ReadableBinaryFile,
content_type: str = "",
overwrite: bool = False,
metadata: dict[str, str] | None = None,
) -> tuple[str, str]:
"""
Store a file in S3 given its filename and contents. Contents should be UTF-8 encoded.
:raises FileExists: if a file with the given name already exists in the bucket and
raising an exception is enabled via an env var.
"""
key_path = self.make_key_path(namespace=namespace, filepath=filename)
if not overwrite:
try:
existing_boto_object = self._get_boto_object_for_key(key=key_path)
except KeyDoesNotExist:
pass
else:
if _should_raise_error_on_existing_files():
raise FileExists(
"A file with this name already exists. Pass overwrite=True if you're sure "
"it's safe to overwrite the contents of the existing file."
)
_log_existing_file_returned(key_path)
return self.bucket_name, existing_boto_object.key
readable = _to_stream(contents=contents)
# `boto_client.upload_fileobj` is type annotated with `Fileobj: BinaryIO`. However, in
# practice the only file-like method it needs is `read(size=...)`. This cast allows us to
# use `upload_fileobj` with any Fileobj that implements the `ReadableBinaryFile` protocol
# but not the whole of BinaryIO. This includes, importantly, Django's `UploadedFile`.
file_obj = cast(BinaryIO, readable)
extra_args: dict[str, Any] = {}
if content_type:
extra_args["ContentType"] = content_type
if policy := self._get_policy():
extra_args["ACL"] = policy
if metadata:
extra_args["Metadata"] = metadata
boto_client = self._get_boto_client()
boto_client.upload_fileobj(
Fileobj=file_obj, Bucket=self.bucket_name, Key=key_path, ExtraArgs=extra_args
)
return self.bucket_name, key_path
def store_versioned_file(
self,
key_path: str,
contents: AnyStr | io.BytesIO,
content_type: str = "",
) -> tuple[str, str, str]:
if not self._bucket_is_versioned():
raise BucketNotVersioned()
file_obj = _to_stream(contents=contents)
extra_args: dict[str, str] = {}
if content_type:
extra_args["ContentType"] = content_type
if policy := self._get_policy():
extra_args["ACL"] = policy
boto_client = self._get_boto_client()
boto_response = boto_client.put_object(
Body=file_obj, Bucket=self.bucket_name, Key=key_path, **extra_args # type: ignore[arg-type]
)
version_id = boto_response["VersionId"]
return self.bucket_name, key_path, version_id
def store_filepath(
self, namespace: str, filepath: str, overwrite: bool = False, dest_filepath: str = ""
) -> tuple[str, str]:
"""
Store a file in S3 given its local filepath.
"""
if not dest_filepath:
dest_filepath = os.path.basename(filepath)
key_path = self.make_key_path(namespace=namespace, filepath=dest_filepath)
if not overwrite:
try:
existing_boto_object = self._get_boto_object_for_key(key=key_path)
except KeyDoesNotExist:
pass
else:
if _should_raise_error_on_existing_files():
raise FileExists(
"A file with this name already exists. Pass overwrite=True if you're sure "
"it's safe to overwrite the contents of the existing file."
)
_log_existing_file_returned(key_path)
return self.bucket_name, existing_boto_object.key
extra_args = {}
if policy := self._get_policy():
extra_args["ACL"] = policy
boto_client = self._get_boto_client()
boto_client.upload_file(
Filename=filepath, Bucket=self.bucket_name, Key=key_path, ExtraArgs=extra_args
)
return self.bucket_name, key_path
def get_key(self, key_path: str, version_id: str | None = None) -> S3Object:
return S3Object(bucket_name=self.bucket_name, key=key_path, version_id=version_id)
def get_file_type(self, key_path: str) -> str:
return self._get_boto_object_for_key(key=key_path).content_type
def fetch_file(self, key_path: str, version_id: str | None = None) -> StreamingBody:
boto_object = self._get_boto_object_for_key(key=key_path, version_id=version_id)
return boto_object.get()["Body"]
def fetch_file_contents(self, key_path: str, version_id: str | None = None) -> bytes:
return self.fetch_file(key_path, version_id).read()
def fetch_file_contents_using_s3_select(
self,
key_path: str,
raw_sql: str,
input_serializer: s3_select.CSVInputSerializer,
output_serializer: s3_select.CSVOutputSerializer | s3_select.JSONOutputSerializer,
compression_type: s3_select.CompressionType,
scan_range: s3_select.ScanRange | None = None,
chunk_size: int | None = None,
) -> Iterator[str]:
"""
Reads a CSV file from S3 using the given SQL statement.
Reference: https://dev.to/idrisrampurawala/efficiently-streaming-a-large-aws-s3-file-via-s3-select-4on
"""
boto_client = self._get_boto_client()
serialization = s3_select.get_serializers_for_csv_file(
input_serializer=input_serializer,
compression_type=compression_type,
output_serializer=output_serializer,
scan_range=scan_range,
)
select_object_content_parameters = dict(
Bucket=self.bucket_name,
Key=key_path,
ExpressionType="SQL",
Expression=raw_sql,
InputSerialization=serialization["input_serialization"],
OutputSerialization=serialization["output_serialization"],
)
if scan_range:
yield from self._select_object_content_using_scan_range(
boto_client=boto_client,
select_object_content_parameters=select_object_content_parameters,
key_path=key_path,
scan_range=scan_range,
chunk_size=chunk_size,
)
else:
yield from self._select_object_content(
boto_client=boto_client,
select_object_content_parameters=select_object_content_parameters,
)
def fetch_url(
self,
key_path: str,
expires_in: int = 60,
response_headers: dict[str, str] | None = None,
version_id: str | None = None,
) -> str:
"""
Return a presigned URL that grants public access to an object. A presigned URL remains
valid for a limited period of time which is specified with `expires_in` in seconds.
Additional response headers can be provided with `response_headers`, using keys such as
`"ResponseContentType"` or `"ResponseContentDisposition"`.
"""
params = response_headers or {}
# Ensure these values take precedence if the keys were ever, incorrectly, already in
# the `response_headers` dictionary.
params.update({"Bucket": self.bucket_name, "Key": key_path})
if version_id:
params.update({"VersionId": version_id})
boto_client = self._get_boto_client()
return boto_client.generate_presigned_url(
"get_object", Params=params, ExpiresIn=expires_in
)
def generate_presigned_post(self, *, key_path: str, expires_in: int = 60) -> PreSignedPost:
boto_client = self._get_boto_client()
presigned_post = boto_client.generate_presigned_post(
Bucket=self.bucket_name, Key=key_path, ExpiresIn=expires_in
)
return PreSignedPost(
url=presigned_post["url"],
fields=presigned_post["fields"],
key=presigned_post["fields"]["key"],
)
def exists(self, key_path: str, as_file: bool = False) -> bool:
try:
boto_object = self._get_boto_object_for_key(key=key_path)
except KeyDoesNotExist:
return False
return not (as_file and boto_object.content_length == 0)
def list_s3_keys(self, namespace: str = "") -> Iterable[S3Object]:
boto_bucket = self._get_boto_bucket()
for object_summary in boto_bucket.objects.filter(Prefix=namespace):
yield S3Object(bucket_name=self.bucket_name, key=object_summary.key)
def list_s3_keys_page( # nosec B107
self,
namespace: str = "",
*,
next_token: str = "",
max_keys: int = 100,
) -> tuple[Iterable[S3Object], str]:
"""
Lists subset of files in the S3 bucket, optionally limited to only those within a given
namespace. S3 keys ending with a forward slash are excluded, on the assumption that they
are folders, not files.
:param namespace: Limits keys to only those with this prefix
:param next_token: next_token supplied from the last returned result
:param max_keys: the number of keys to request from the bucket (<1000)
:return: Tuple of generator of key names, and next token for next page
"""
client = self._get_boto_client()
# Not using ContinuationToken as ContinuationToken is tied to a specific client
response = client.list_objects_v2(
Bucket=self.bucket_name,
Prefix=namespace,
MaxKeys=max_keys,
StartAfter=next_token,
)
keys = [item["Key"] for item in response.get("Contents") or []]
next_token = keys[-1] if response["IsTruncated"] else ""
keys = [key for key in keys if not key.endswith("/")]
objects = [S3Object(bucket_name=self.bucket_name, key=key) for key in keys]
return objects, next_token
def list_files(self, namespace: str = "") -> Iterable[str]:
"""
Lists all files in the S3 bucket, optionally limited to only those within a given
namespace. S3 keys ending with a forward slash are excluded, on the assumption that they
are folders, not files.
:param namespace: Limits keys to only those with this prefix
:return: Generator of key names
"""
boto_bucket = self._get_boto_bucket()
for object_summary in boto_bucket.objects.filter(Prefix=namespace):
if not object_summary.key.endswith("/"): # filter out folders
yield object_summary.key
def download_file(self, key_path: str) -> BinaryIO:
"""
A more efficient version of the superclass's method.
"""
boto_object = self._get_boto_object_for_key(key=key_path)
filepath = self._build_download_filepath(key_path)
_create_parent_directories(filepath)
with open(filepath, "wb") as f:
boto_object.download_fileobj(Fileobj=f)
return f
def download_to_file(self, key_path: str, file_path: str) -> None:
boto_object = self._get_boto_object_for_key(key=key_path)
boto_object.download_file(Filename=file_path)
def get_size_in_bytes(self, *, s3_object: S3Object) -> int:
boto_object = self._get_boto_object(s3_object=s3_object)
return boto_object.content_length
def get_last_modified(self, key_path: str) -> datetime.datetime:
boto_object = self._get_boto_object_for_key(key=key_path)
return boto_object.last_modified
def copy(self, *, s3_object: S3Object, destination: str) -> S3Object:
extra_args = {}
if policy := self._get_policy():
extra_args["ACL"] = policy
dest_boto_object = self._get_boto_bucket().Object(destination)
dest_boto_object.copy(
CopySource={"Bucket": s3_object.bucket_name, "Key": s3_object.key},
ExtraArgs=extra_args,
)
return S3Object(bucket_name=self.bucket_name, key=destination)
def rename(self, *, s3_object: S3Object, destination: str) -> S3Object:
src_boto_object = self._get_boto_object(s3_object=s3_object)
extra_args = {}
if policy := self._get_policy():
extra_args["ACL"] = policy
dest_boto_object = self._get_boto_bucket().Object(destination)
dest_boto_object.copy(
CopySource={"Bucket": s3_object.bucket_name, "Key": s3_object.key},
ExtraArgs=extra_args,
)
if src_boto_object.key != dest_boto_object.key:
# Only delete the old file if the source and destination are different. Otherwise
# renaming a file to its own path would delete it.
src_boto_object.delete()
return S3Object(bucket_name=self.bucket_name, key=destination)
def delete(self, *, s3_object: S3Object) -> None:
boto_object = self._get_boto_object(s3_object=s3_object)
boto_object.delete()
# Private
def _get_policy(self) -> str | None:
if self.set_acl_bucket_owner:
# If the storage class is configured to, we will set the ACL of keys that we
# set content on to have the policy "bucket-owner-full-control".
# This is useful when writing to a bucket where the owner is an external AWS account.
# NB: You will need PutObjectAcl permissions on the bucket in order to set the ACL.
return self.ACL_BUCKET_OWNER_FULL_CONTROL
return None
def _get_boto_client(self) -> S3Client:
return boto3.client(
"s3", region_name=settings.AWS_REGION, endpoint_url=settings.AWS_S3_ENDPOINT_URL
)
def _get_boto_bucket(self) -> service_resource.Bucket:
boto_resource = boto3.resource("s3", region_name=settings.AWS_REGION)
return boto_resource.Bucket(self.bucket_name)
def _bucket_is_versioned(self) -> bool:
boto_client = self._get_boto_client()
versioning_info = boto_client.get_bucket_versioning(Bucket=self.bucket_name)
return versioning_info["Status"] == "Enabled"
def _get_boto_object(self, *, s3_object: S3Object) -> service_resource.Object:
assert s3_object.bucket_name == self.bucket_name, (
f"Expected an S3Object from the '{self.bucket_name}' store, "
f"but it's from the '{s3_object.bucket_name}' store"
)
boto_object = self._get_boto_bucket().Object(key=s3_object.key)
if s3_object.version_id:
if not self._bucket_is_versioned():
raise BucketNotVersioned()
boto_object.version_id = s3_object.version_id
try:
# Calls `S3.Client.head_object()` to fetch the object's attributes; e.g. its size.
boto_object.load()
except botocore_exceptions.ClientError as error:
if error.response.get("Error", {}).get("Code", "") == "404":
raise KeyDoesNotExist("Key with path %s was not found" % s3_object.key)
raise
return boto_object
def _get_boto_object_for_key(
self, *, key: str, version_id: str | None = None
) -> service_resource.Object:
# Note: It looks like it can be DRYed up with a call to self.get_key().
# *This is not safe.* It cannot be DRYed like this.
# It'd cause the S3SubdirectoryFileStore to add the subdir to the path, which is
# not safe since the key may have come from somewhere that already includes this.
return self._get_boto_object(
s3_object=S3Object(bucket_name=self.bucket_name, key=key, version_id=version_id)
)
def _select_object_content(
self,
*,
boto_client: S3Client,
select_object_content_parameters: dict[str, Any],
) -> Iterator[str]:
# Error codes reference: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList
invalid_response_statuses = [400, 401, 403, 500]
try:
response = boto_client.select_object_content(
**select_object_content_parameters,
)
except botocore_exceptions.ClientError as error:
if (
error.response.get("Error", {}).get("HTTPStatusCode", None)
in invalid_response_statuses
):
raise S3SelectUnexpectedResponse("Received invalid response from S3 Select")
raise
if response["ResponseMetadata"]["HTTPStatusCode"] in invalid_response_statuses:
raise S3SelectUnexpectedResponse("Received invalid response from S3 Select")
for event_stream in response["Payload"]:
if records := event_stream.get("Records"): # type:ignore [attr-defined]
yield records["Payload"].decode("utf-8")
def _select_object_content_using_scan_range(
self,
*,
boto_client: S3Client,
select_object_content_parameters: dict[str, Any],
key_path: str,
scan_range: s3_select.ScanRange,
chunk_size: int | None = None,
) -> Iterator[str]:
"""
Performs SQL queries on S3 objects (CSV/JSON) using the given offsets (Scan Range)
"""
file_size = self.get_size_in_bytes(
s3_object=S3Object(bucket_name=self.bucket_name, key=key_path)
)
start_range = scan_range.Start if scan_range.Start else 0
if chunk_size:
chunk_size = min(chunk_size, MAX_S3_SELECT_SIZE_RANGE)
end_range = scan_range.End if scan_range.End else min(chunk_size, file_size)
while start_range < file_size:
yield from self._select_object_content(
boto_client=boto_client,
select_object_content_parameters=dict(
**select_object_content_parameters,
ScanRange=dataclasses.asdict(
s3_select.ScanRange(Start=start_range, End=end_range)
),
),
)
start_range = end_range
end_range = end_range + min(chunk_size, file_size - end_range)
else:
end_range = scan_range.End if scan_range.End else file_size
if (end_range - start_range) > MAX_S3_SELECT_SIZE_RANGE:
raise ValueError(
f"The difference between the start range and end range should be less than 1 MB ({MAX_S3_SELECT_SIZE_RANGE} bytes)."
)
yield from self._select_object_content(
boto_client=boto_client,
select_object_content_parameters=dict(
**select_object_content_parameters,
ScanRange=dataclasses.asdict(
s3_select.ScanRange(Start=start_range, End=end_range)
),
),
)
class S3SubdirectoryFileStore(S3FileStore):
"""
A S3FileStore which can expose just a given subdirectory rather than a whole bucket.
"""
def __init__(self, uri: str) -> None:
parsed_url = urllib.parse.urlparse(uri)
if parsed_url.scheme != "s3":
raise ValueError(f"Expected URL starting 's3://'. Got {uri!r}")
if not parsed_url.netloc:
raise ValueError(f"Expected S3 URL including a bucket name. Got {uri!r}")
set_acl_bucket_owner = "set_acl_bucket_owner" in parsed_url.query
use_date_in_key_path = "use_date_in_key_path" in parsed_url.query
self.bucket_name = parsed_url.netloc
self.path = parsed_url.path.strip("/")
super().__init__(
self.bucket_name,
use_date_in_key_path=use_date_in_key_path,
set_acl_bucket_owner=set_acl_bucket_owner,
)
def make_key_path(self, *, namespace: str = "", filepath: str) -> str:
if self.path:
namespace = os.path.join(self.path, namespace) if namespace else self.path
return super().make_key_path(namespace=namespace, filepath=filepath)
def get_key(self, key_path: str, version_id: str | None = None) -> S3Object:
if self.path:
key_path = os.path.join(self.path, key_path)
return super().get_key(key_path, version_id)
def fetch_url(
self,
key_path: str,
expires_in: int = 60,
response_headers: dict[str, str] | None = None,
version_id: str | None = None,
) -> str:
if self.path:
key_path = os.path.join(self.path, key_path)
return super().fetch_url(
key_path=key_path,
expires_in=expires_in,
response_headers=response_headers,
version_id=version_id,
)
def list_s3_keys(self, namespace: str = "") -> Iterable[S3Object]:
if self.path:
namespace = os.path.join(self.path, namespace)
return super().list_s3_keys(namespace)
def list_s3_keys_page(
self,
namespace: str = "",
*,
next_token: str = "",
max_keys: int = 100,
) -> tuple[Iterable[S3Object], str]:
if self.path:
namespace = os.path.join(self.path, namespace)
objects, next_token = super().list_s3_keys_page(
namespace, next_token=next_token, max_keys=max_keys
)
if self.path:
objects = [
dataclasses.replace(object, key=object.key[len(self.path) + 1 :])
for object in objects
]
return objects, next_token
def list_files(self, namespace: str = "") -> Iterable[str]:
if self.path:
namespace = os.path.join(self.path, namespace)
full_paths = super().list_files(namespace)
yield from (path[len(self.path) + 1 :] for path in full_paths)
def copy(self, *, s3_object: S3Object, destination: str) -> S3Object:
if self.path:
destination = os.path.join(self.path, destination)
return super().copy(s3_object=s3_object, destination=destination)
def rename(self, *, s3_object: S3Object, destination: str) -> S3Object:
if self.path:
destination = os.path.join(self.path, destination)
return super().rename(s3_object=s3_object, destination=destination)
# Private
def _get_boto_object_for_key(
self, *, key: str, version_id: str | None = None
) -> service_resource.Object:
if self.path and not key.startswith(self.path):
key = os.path.join(self.path, key)
return super()._get_boto_object_for_key(key=key, version_id=version_id)
class LocalStorageBucket:
"""
S3 bucket for local development.
"""
document_url_base = "/static/support/sample-files/sample-documents"
document_filenames = ["sample.pdf"]
class LocalFileStore(BaseS3FileStore):
"""
For local development.
"""
def __init__(
self,
bucket_name: str,
storage_root: str = "",
use_date_in_key_path: bool = True,
set_acl_bucket_owner: bool = False,
) -> None:
# This is taken from https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
if not (3 <= len(bucket_name) <= 63):