Skip to content

Commit 3e36f4d

Browse files
authored
@tus/s3-store: Fix invalid chunk size for uploads with deferred length (#504)
* @tus/s3-store: Fix invalid chunk size for uploads with deferred length * fix documentation regarding part size limits * fix units to use binary prefix (KiB, MiB, GiB, TiB)
1 parent 0f0e5d3 commit 3e36f4d

File tree

4 files changed

+22
-10
lines changed

4 files changed

+22
-10
lines changed

demo/server.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ const stores = {
2929
assert.ok(process.env.AWS_REGION, 'environment variable `AWS_REGION` must be set')
3030

3131
return new S3Store({
32-
partSize: 8 * 1024 * 1024, // each uploaded part will have ~8MB,
32+
partSize: 8 * 1024 * 1024, // each uploaded part will have ~8MiB,
3333
s3ClientConfig: {
3434
bucket: process.env.AWS_BUCKET,
3535
accessKeyId: process.env.AWS_ACCESS_KEY_ID,

packages/s3-store/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ const {Server} = require('@tus/server')
3333
const {S3Store} = require('@tus/s3-store')
3434

3535
const s3Store = new S3Store({
36-
partSize: 8 * 1024 * 1024, // Each uploaded part will have ~8MB,
36+
partSize: 8 * 1024 * 1024, // Each uploaded part will have ~8MiB,
3737
s3ClientConfig: {
3838
bucket: process.env.AWS_BUCKET,
3939
region: process.env.AWS_REGION,
@@ -61,7 +61,7 @@ The bucket name.
6161

6262
#### `options.partSize`
6363

64-
The preferred part size for parts send to S3. Can not be lower than 5MB or more than 500MB.
64+
The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
6565
The server calculates the optimal part size, which takes this size into account,
6666
but may increase it to not exceed the S3 10K parts limit.
6767

packages/s3-store/index.ts

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ function calcOffsetFromParts(parts?: Array<AWS.Part>) {
1818
}
1919

2020
type Options = {
21-
// The preferred part size for parts send to S3. Can not be lower than 5MB or more than 500MB.
21+
// The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
2222
// The server calculates the optimal part size, which takes this size into account,
2323
// but may increase it to not exceed the S3 10K parts limit.
2424
partSize?: number
@@ -70,7 +70,8 @@ export class S3Store extends DataStore {
7070
private client: S3
7171
private preferredPartSize: number
7272
public maxMultipartParts = 10_000 as const
73-
public minPartSize = 5_242_880 as const // 5MB
73+
public minPartSize = 5_242_880 as const // 5MiB
74+
public maxUploadSize = 5_497_558_138_880 as const // 5TiB
7475

7576
constructor(options: Options) {
7677
super()
@@ -262,7 +263,7 @@ export class S3Store extends DataStore {
262263
currentPartNumber: number,
263264
offset: number
264265
): Promise<number> {
265-
const size = metadata.file.size as number
266+
const size = metadata.file.size
266267
const promises: Promise<void>[] = []
267268
let pendingChunkFilepath: string | null = null
268269
let bytesUploaded = 0
@@ -422,7 +423,12 @@ export class S3Store extends DataStore {
422423
this.cache.delete(id)
423424
}
424425

425-
private calcOptimalPartSize(size: number): number {
426+
private calcOptimalPartSize(size?: number): number {
427+
// When upload size is not know we assume largest possible value (`maxUploadSize`)
428+
if (size === undefined) {
429+
size = this.maxUploadSize
430+
}
431+
426432
let optimalPartSize: number
427433

428434
// When upload is smaller or equal to PreferredPartSize, we upload in just one part.

packages/s3-store/test.ts

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ describe('S3DataStore', function () {
2121
})
2222
beforeEach(function () {
2323
this.datastore = new S3Store({
24-
partSize: 8 * 1024 * 1024, // Each uploaded part will have ~8MB,
24+
partSize: 8 * 1024 * 1024, // Each uploaded part will have ~8MiB,
2525
s3ClientConfig: {
2626
bucket: process.env.AWS_BUCKET as string,
2727
credentials: {
@@ -33,6 +33,12 @@ describe('S3DataStore', function () {
3333
})
3434
})
3535

36+
it('calculated part size for deferred lenght should be finite', async function () {
37+
const store = this.datastore
38+
39+
assert.strictEqual(Number.isFinite(store.calcOptimalPartSize(undefined)), true)
40+
})
41+
3642
it('should correctly prepend a buffer to a file', async function () {
3743
const p = path.resolve(fixturesPath, 'foo.txt')
3844
await fs.writeFile(p, 'world!')
@@ -109,8 +115,8 @@ describe('S3DataStore', function () {
109115

110116
it('upload as multipart upload when incomplete part grows beyond minimal part size', async function () {
111117
const store = this.datastore
112-
const size = 10 * 1024 * 1024 // 10MB
113-
const incompleteSize = 2 * 1024 * 1024 // 2MB
118+
const size = 10 * 1024 * 1024 // 10MiB
119+
const incompleteSize = 2 * 1024 * 1024 // 2MiB
114120
const getIncompletePart = sinon.spy(store, 'getIncompletePart')
115121
const uploadIncompletePart = sinon.spy(store, 'uploadIncompletePart')
116122
const uploadPart = sinon.spy(store, 'uploadPart')

0 commit comments

Comments
 (0)