-
Notifications
You must be signed in to change notification settings - Fork 2.3k
/
Copy pathstaticHandler.ts
111 lines (94 loc) · 3.37 KB
/
staticHandler.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import type * as AWS from '@aws-sdk/client-s3'
import type { StaticHandler } from '@payloadcms/plugin-cloud-storage/types'
import type { CollectionConfig } from 'payload'
import type { Readable } from 'stream'
import { getFilePrefix } from '@payloadcms/plugin-cloud-storage/utilities'
import path from 'path'
interface Args {
bucket: string
collection: CollectionConfig
getStorageClient: () => AWS.S3
}
// Type guard for NodeJS.Readable streams
const isNodeReadableStream = (body: unknown): body is Readable => {
return (
typeof body === 'object' &&
body !== null &&
'pipe' in body &&
typeof (body as any).pipe === 'function' &&
'destroy' in body &&
typeof (body as any).destroy === 'function'
)
}
const destroyStream = (object: AWS.GetObjectOutput | undefined) => {
if (object?.Body && isNodeReadableStream(object.Body)) {
object.Body.destroy()
}
}
// Convert a stream into a promise that resolves with a Buffer
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const streamToBuffer = async (readableStream: any) => {
const chunks = []
for await (const chunk of readableStream) {
chunks.push(typeof chunk === 'string' ? Buffer.from(chunk) : chunk)
}
return Buffer.concat(chunks)
}
export const getHandler = ({ bucket, collection, getStorageClient }: Args): StaticHandler => {
return async (req, { headers: incomingHeaders, params: { clientUploadContext, filename } }) => {
let object: AWS.GetObjectOutput | undefined = undefined
try {
const prefix = await getFilePrefix({ clientUploadContext, collection, filename, req })
const key = path.posix.join(prefix, filename)
object = await getStorageClient().getObject({
Bucket: bucket,
Key: key,
})
if (!object.Body) {
return new Response(null, { status: 404, statusText: 'Not Found' })
}
let headers = new Headers(incomingHeaders)
headers.append('Content-Length', String(object.ContentLength))
headers.append('Content-Type', String(object.ContentType))
headers.append('Accept-Ranges', String(object.AcceptRanges))
headers.append('ETag', String(object.ETag))
const etagFromHeaders = req.headers.get('etag') || req.headers.get('if-none-match')
const objectEtag = object.ETag
if (
collection.upload &&
typeof collection.upload === 'object' &&
typeof collection.upload.modifyResponseHeaders === 'function'
) {
headers = collection.upload.modifyResponseHeaders({ headers }) || headers
}
if (etagFromHeaders && etagFromHeaders === objectEtag) {
return new Response(null, {
headers,
status: 304,
})
}
// On error, manually destroy stream to close socket
if (object.Body && isNodeReadableStream(object.Body)) {
const stream = object.Body
stream.on('error', (err) => {
req.payload.logger.error({
err,
key,
msg: 'Error streaming S3 object, destroying stream',
})
stream.destroy()
})
}
const bodyBuffer = await streamToBuffer(object.Body)
return new Response(bodyBuffer, {
headers,
status: 200,
})
} catch (err) {
req.payload.logger.error(err)
return new Response('Internal Server Error', { status: 500 })
} finally {
destroyStream(object)
}
}
}