Skip to content

Commit 50580a2

Browse files
authored
@tus/s3-store: migrate to V3 SDK (#454)
1 parent a0459f8 commit 50580a2

File tree

5 files changed

+1361
-345
lines changed

5 files changed

+1361
-345
lines changed

packages/s3-store/README.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,10 @@ const s3Store = new S3Store({
3737
s3ClientConfig: {
3838
bucket: process.env.AWS_BUCKET,
3939
region: process.env.AWS_REGION,
40-
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
41-
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
40+
credentials: {
41+
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
42+
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
43+
},
4244
},
4345
})
4446
const server = new Server({path: '/files', datastore: s3Store})

packages/s3-store/index.ts

Lines changed: 100 additions & 102 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,16 @@ import stream from 'node:stream/promises'
44
import type {Readable} from 'node:stream'
55
import http from 'node:http'
66

7-
import aws from 'aws-sdk'
7+
import AWS, {S3, S3ClientConfig} from '@aws-sdk/client-s3'
88
import debug from 'debug'
99

1010
import {DataStore, StreamSplitter, Upload} from '@tus/server'
1111
import {ERRORS, TUS_RESUMABLE} from '@tus/server'
1212

1313
const log = debug('tus-node-server:stores:s3store')
1414

15-
function calcOffsetFromParts(parts?: aws.S3.Parts) {
16-
// @ts-expect-error object is not possibly undefined
15+
function calcOffsetFromParts(parts?: Array<AWS.Part>) {
16+
// @ts-expect-error not undefined
1717
return parts && parts.length > 0 ? parts.reduce((a, b) => a + b.Size, 0) : 0
1818
}
1919

@@ -23,10 +23,14 @@ type Options = {
2323
// but may increase it to not exceed the S3 10K parts limit.
2424
partSize?: number
2525
// Options to pass to the AWS S3 SDK.
26-
s3ClientConfig: aws.S3.Types.ClientConfiguration & {bucket: string}
26+
s3ClientConfig: S3ClientConfig & {bucket: string}
2727
}
2828

29-
type MetadataValue = {file: Upload; 'upload-id': string; 'tus-version': string}
29+
type MetadataValue = {
30+
file: Upload
31+
'upload-id': string
32+
'tus-version': string
33+
}
3034
// Implementation (based on https://github.com/tus/tusd/blob/master/s3store/s3store.go)
3135
//
3236
// Once a new tus upload is initiated, multiple objects in S3 are created:
@@ -63,7 +67,7 @@ type MetadataValue = {file: Upload; 'upload-id': string; 'tus-version': string}
6367
export class S3Store extends DataStore {
6468
private bucket: string
6569
private cache: Map<string, MetadataValue> = new Map()
66-
private client: aws.S3
70+
private client: S3
6771
private preferredPartSize: number
6872
public maxMultipartParts = 10_000 as const
6973
public minPartSize = 5_242_880 as const // 5MB
@@ -80,7 +84,7 @@ export class S3Store extends DataStore {
8084
]
8185
this.bucket = bucket
8286
this.preferredPartSize = partSize || 8 * 1024 * 1024
83-
this.client = new aws.S3(restS3ClientConfig)
87+
this.client = new S3(restS3ClientConfig)
8488
}
8589

8690
/**
@@ -91,18 +95,16 @@ export class S3Store extends DataStore {
9195
*/
9296
private async saveMetadata(upload: Upload, uploadId: string) {
9397
log(`[${upload.id}] saving metadata`)
94-
await this.client
95-
.putObject({
96-
Bucket: this.bucket,
97-
Key: `${upload.id}.info`,
98-
Body: '',
99-
Metadata: {
100-
file: JSON.stringify(upload),
101-
'upload-id': uploadId,
102-
'tus-version': TUS_RESUMABLE,
103-
},
104-
})
105-
.promise()
98+
await this.client.putObject({
99+
Bucket: this.bucket,
100+
Key: `${upload.id}.info`,
101+
Body: '',
102+
Metadata: {
103+
file: JSON.stringify(upload),
104+
'upload-id': uploadId,
105+
'tus-version': TUS_RESUMABLE,
106+
},
107+
})
106108
log(`[${upload.id}] metadata file saved`)
107109
}
108110

@@ -120,9 +122,10 @@ export class S3Store extends DataStore {
120122
}
121123

122124
log(`[${id}] metadata from s3`)
123-
const {Metadata} = await this.client
124-
.headObject({Bucket: this.bucket, Key: `${id}.info`})
125-
.promise()
125+
const {Metadata} = await this.client.headObject({
126+
Bucket: this.bucket,
127+
Key: `${id}.info`,
128+
})
126129
const file = JSON.parse(Metadata?.file as string)
127130
this.cache.set(id, {
128131
...Metadata,
@@ -155,15 +158,13 @@ export class S3Store extends DataStore {
155158
readStream: fs.ReadStream | Readable,
156159
partNumber: number
157160
): Promise<string> {
158-
const data = await this.client
159-
.uploadPart({
160-
Bucket: this.bucket,
161-
Key: metadata.file.id,
162-
UploadId: metadata['upload-id'],
163-
PartNumber: partNumber,
164-
Body: readStream,
165-
})
166-
.promise()
161+
const data = await this.client.uploadPart({
162+
Bucket: this.bucket,
163+
Key: metadata.file.id,
164+
UploadId: metadata['upload-id'],
165+
PartNumber: partNumber,
166+
Body: readStream,
167+
})
167168
log(`[${metadata.file.id}] finished uploading part #${partNumber}`)
168169
return data.ETag as string
169170
}
@@ -172,30 +173,26 @@ export class S3Store extends DataStore {
172173
id: string,
173174
readStream: fs.ReadStream | Readable
174175
): Promise<string> {
175-
const data = await this.client
176-
.putObject({
177-
Bucket: this.bucket,
178-
Key: id,
179-
Body: readStream,
180-
})
181-
.promise()
176+
const data = await this.client.putObject({
177+
Bucket: this.bucket,
178+
Key: id,
179+
Body: readStream,
180+
})
182181
return data.ETag as string
183182
}
184183

185-
private async getIncompletePart(id: string): Promise<Buffer | undefined> {
184+
private async getIncompletePart(id: string) {
186185
try {
187-
const data = await this.client
188-
.getObject({
189-
Bucket: this.bucket,
190-
Key: id,
191-
})
192-
.promise()
193-
return data.Body as Buffer
186+
const data = await this.client.getObject({
187+
Bucket: this.bucket,
188+
Key: id,
189+
})
190+
return data.Body?.transformToByteArray()
194191
} catch (error) {
195192
if (
196-
error.code === 'NoSuchKey' ||
197-
error.code === 'NoSuchUpload' ||
198-
error.code === 'AccessDenied'
193+
error.Code === 'NoSuchKey' ||
194+
error.Code === 'NoSuchUpload' ||
195+
error.Code === 'AccessDenied'
199196
) {
200197
return undefined
201198
}
@@ -205,18 +202,19 @@ export class S3Store extends DataStore {
205202
}
206203

207204
private async deleteIncompletePart(id: string): Promise<void> {
208-
await this.client
209-
.deleteObject({
210-
Bucket: this.bucket,
211-
Key: id,
212-
})
213-
.promise()
205+
await this.client.deleteObject({
206+
Bucket: this.bucket,
207+
Key: id,
208+
})
214209
}
215210

216-
private async prependIncompletePart(path: string, buffer: Buffer): Promise<void> {
217-
const part = await fsProm.readFile(path, 'utf8')
218-
buffer.write(part, buffer.length - 1)
219-
await fsProm.writeFile(path, buffer)
211+
private async prependIncompletePart(
212+
newChunkPath: string,
213+
previousIncompletePart: Uint8Array
214+
): Promise<void> {
215+
const newChunk = await fsProm.readFile(newChunkPath)
216+
const combined = Buffer.concat([previousIncompletePart, newChunk])
217+
await fsProm.writeFile(newChunkPath, combined)
220218
}
221219

222220
/**
@@ -307,22 +305,20 @@ export class S3Store extends DataStore {
307305
* Completes a multipart upload on S3.
308306
* This is where S3 concatenates all the uploaded parts.
309307
*/
310-
private async finishMultipartUpload(metadata: MetadataValue, parts: aws.S3.Parts) {
311-
const response = await this.client
312-
.completeMultipartUpload({
313-
Bucket: this.bucket,
314-
Key: metadata.file.id,
315-
UploadId: metadata['upload-id'],
316-
MultipartUpload: {
317-
Parts: parts.map((part) => {
318-
return {
319-
ETag: part.ETag,
320-
PartNumber: part.PartNumber,
321-
}
322-
}),
323-
},
324-
})
325-
.promise()
308+
private async finishMultipartUpload(metadata: MetadataValue, parts: Array<AWS.Part>) {
309+
const response = await this.client.completeMultipartUpload({
310+
Bucket: this.bucket,
311+
Key: metadata.file.id,
312+
UploadId: metadata['upload-id'],
313+
MultipartUpload: {
314+
Parts: parts.map((part) => {
315+
return {
316+
ETag: part.ETag,
317+
PartNumber: part.PartNumber,
318+
}
319+
}),
320+
},
321+
})
326322
return response.Location
327323
}
328324

@@ -332,21 +328,27 @@ export class S3Store extends DataStore {
332328
*/
333329
private async retrieveParts(
334330
id: string,
335-
partNumberMarker?: number
336-
): Promise<aws.S3.Parts | undefined> {
337-
const params: aws.S3.ListPartsRequest = {
331+
partNumberMarker?: string
332+
): Promise<Array<AWS.Part> | undefined> {
333+
const params: AWS.ListPartsCommandInput = {
338334
Bucket: this.bucket,
339335
Key: id,
340-
UploadId: this.cache.get(id)?.['upload-id'] as string,
336+
UploadId: this.cache.get(id)?.['upload-id'],
341337
}
342338
if (partNumberMarker) {
343339
params.PartNumberMarker = partNumberMarker
344340
}
345341

346-
const data = await this.client.listParts(params).promise()
347-
if (data.NextPartNumberMarker) {
342+
const data = await this.client.listParts(params)
343+
344+
// INFO: NextPartNumberMarker should be undefined when there are no more parts to retrieve,
345+
// instead it keeps giving `0` so to prevent an infinite loop we check the number.
346+
if (data.NextPartNumberMarker && Number(data.NextPartNumberMarker) > 0) {
348347
return this.retrieveParts(id, data.NextPartNumberMarker).then((parts) => {
349-
return [...(data.Parts as aws.S3.Parts), ...(parts as aws.S3.Parts)]
348+
if (parts && data.Parts) {
349+
return [...data.Parts, ...parts]
350+
}
351+
return data.Parts
350352
})
351353
}
352354

@@ -398,7 +400,7 @@ export class S3Store extends DataStore {
398400
*/
399401
public async create(upload: Upload) {
400402
log(`[${upload.id}] initializing multipart upload`)
401-
type CreateRequest = Omit<aws.S3.Types.CreateMultipartUploadRequest, 'Metadata'> & {
403+
type CreateRequest = Omit<AWS.CreateMultipartUploadCommandInput, 'Metadata'> & {
402404
Metadata: Record<string, string>
403405
}
404406
const request: CreateRequest = {
@@ -426,7 +428,7 @@ export class S3Store extends DataStore {
426428
// TODO: rename `file` to `upload` to align with the codebase
427429
request.Metadata.file = JSON.stringify(file)
428430

429-
const res = await this.client.createMultipartUpload(request).promise()
431+
const res = await this.client.createMultipartUpload(request)
430432
log(`[${upload.id}] multipart upload created (${res.UploadId})`)
431433
await this.saveMetadata(upload, res.UploadId as string)
432434

@@ -459,7 +461,7 @@ export class S3Store extends DataStore {
459461
if (metadata.file.size === newOffset) {
460462
try {
461463
const parts = await this.retrieveParts(id)
462-
await this.finishMultipartUpload(metadata, parts as aws.S3.Parts)
464+
await this.finishMultipartUpload(metadata, parts as Array<AWS.Part>)
463465
this.clearCache(id)
464466
} catch (error) {
465467
log(`[${id}] failed to finish upload`, error)
@@ -491,7 +493,7 @@ export class S3Store extends DataStore {
491493
// completed and therefore can ensure the the offset is the size.
492494
// AWS S3 returns NoSuchUpload, but other implementations, such as DigitalOcean
493495
// Spaces, can also return NoSuchKey.
494-
if (error.code === 'NoSuchUpload' || error.code === 'NoSuchKey') {
496+
if (error.Code === 'NoSuchUpload' || error.Code === 'NoSuchKey') {
495497
return new Upload({
496498
id,
497499
...this.cache.get(id)?.file,
@@ -530,30 +532,26 @@ export class S3Store extends DataStore {
530532
try {
531533
const {'upload-id': uploadId} = await this.getMetadata(id)
532534
if (uploadId) {
533-
await this.client
534-
.abortMultipartUpload({
535-
Bucket: this.bucket,
536-
Key: id,
537-
UploadId: uploadId,
538-
})
539-
.promise()
535+
await this.client.abortMultipartUpload({
536+
Bucket: this.bucket,
537+
Key: id,
538+
UploadId: uploadId,
539+
})
540540
}
541541
} catch (error) {
542-
if (error?.code && ['NotFound', 'NoSuchKey', 'NoSuchUpload'].includes(error.code)) {
542+
if (error?.code && ['NotFound', 'NoSuchKey', 'NoSuchUpload'].includes(error.Code)) {
543543
log('remove: No file found.', error)
544544
throw ERRORS.FILE_NOT_FOUND
545545
}
546546
throw error
547547
}
548548

549-
await this.client
550-
.deleteObjects({
551-
Bucket: this.bucket,
552-
Delete: {
553-
Objects: [{Key: id}, {Key: `${id}.info`}],
554-
},
555-
})
556-
.promise()
549+
await this.client.deleteObjects({
550+
Bucket: this.bucket,
551+
Delete: {
552+
Objects: [{Key: id}, {Key: `${id}.info`}],
553+
},
554+
})
557555

558556
this.clearCache(id)
559557
}

packages/s3-store/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
"test": "mocha test.ts --timeout 40000 --exit --extension ts --require ts-node/register"
2222
},
2323
"dependencies": {
24-
"aws-sdk": "^2.1368.0",
24+
"@aws-sdk/client-s3": "^3.369.0",
2525
"debug": "^4.3.3"
2626
},
2727
"devDependencies": {

0 commit comments

Comments
 (0)