diff --git a/.changeset/plenty-actors-compare.md b/.changeset/plenty-actors-compare.md new file mode 100644 index 00000000..0ae73bf5 --- /dev/null +++ b/.changeset/plenty-actors-compare.md @@ -0,0 +1,5 @@ +--- +"@tus/s3-store": patch +--- + +Fix increment for part numbers diff --git a/packages/s3-store/src/index.ts b/packages/s3-store/src/index.ts index e366ebbc..640348dc 100644 --- a/packages/s3-store/src/index.ts +++ b/packages/s3-store/src/index.ts @@ -367,8 +367,8 @@ export class S3Store extends DataStore { .on('chunkFinished', ({path, size: partSize}) => { pendingChunkFilepath = null - const partNumber = currentPartNumber + 1 const acquiredPermit = permit + const partNumber = currentPartNumber++ offset += partSize diff --git a/packages/s3-store/test/index.ts b/packages/s3-store/test/index.ts index 66c4b29b..d33d1886 100644 --- a/packages/s3-store/test/index.ts +++ b/packages/s3-store/test/index.ts @@ -11,6 +11,15 @@ import {Upload} from '@tus/utils' const fixturesPath = path.resolve('../', '../', 'test', 'fixtures') const storePath = path.resolve('../', '../', 'test', 'output', 's3-store') +const s3ClientConfig = { + bucket: process.env.AWS_BUCKET as string, + credentials: { + accessKeyId: process.env.AWS_ACCESS_KEY_ID as string, + secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY as string, + }, + region: process.env.AWS_REGION, +} + describe('S3DataStore', () => { before(function () { this.testFileSize = 960_244 @@ -21,14 +30,7 @@ describe('S3DataStore', () => { beforeEach(function () { this.datastore = new S3Store({ partSize: 8 * 1024 * 1024, // Each uploaded part will have ~8MiB, - s3ClientConfig: { - bucket: process.env.AWS_BUCKET as string, - credentials: { - accessKeyId: process.env.AWS_ACCESS_KEY_ID as string, - secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY as string, - }, - region: process.env.AWS_REGION, - }, + s3ClientConfig, }) }) @@ -196,6 +198,50 @@ describe('S3DataStore', () => { assert.equal(offset, incompleteSize) }) + it('should use strictly sequential part numbers when uploading multiple chunks', async () => { + const store = new S3Store({ + partSize: 5 * 1024 * 1024, + maxConcurrentPartUploads: 1, + s3ClientConfig, + }) + + // @ts-expect-error private method + const uploadPartSpy = sinon.spy(store, 'uploadPart') + + const size = 15 * 1024 * 1024 + const upload = new Upload({ + id: shared.testId('increment-bug'), + size: size, + offset: 0, + }) + + await store.create(upload) + + // Write all 15 MB in a single call (S3Store will internally chunk to ~3 parts): + const offset = await store.write(Readable.from(Buffer.alloc(size)), upload.id, 0) + + assert.equal(offset, size) + + const finalUpload = await store.getUpload(upload.id) + assert.equal(finalUpload.offset, size, 'getUpload offset should match total size') + + const partNumbers = uploadPartSpy.getCalls().map((call) => call.args[2]) + + for (let i = 0; i < partNumbers.length; i++) { + if (i === 0) { + assert.equal(partNumbers[i], 1, 'First part number must be 1') + } else { + const prev = partNumbers[i - 1] + const curr = partNumbers[i] + assert.equal( + curr, + prev + 1, + `Part numbers should increment by 1. Found jump from ${prev} to ${curr}` + ) + } + } + }) + shared.shouldHaveStoreMethods() shared.shouldCreateUploads() shared.shouldRemoveUploads() // Termination extension