Skip to content

Commit 4002c69

Browse files
committed
Fix test
1 parent 2b38291 commit 4002c69

File tree

2 files changed

+10
-23
lines changed

2 files changed

+10
-23
lines changed

packages/s3-store/src/index.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -368,6 +368,7 @@ export class S3Store extends DataStore {
368368
pendingChunkFilepath = null
369369

370370
const acquiredPermit = permit
371+
const partNumber = currentPartNumber++
371372

372373
offset += partSize
373374

@@ -382,7 +383,7 @@ export class S3Store extends DataStore {
382383
readable.on('error', reject)
383384

384385
if (partSize >= this.minPartSize || isFinalPart) {
385-
await this.uploadPart(metadata, readable, currentPartNumber)
386+
await this.uploadPart(metadata, readable, partNumber)
386387
} else {
387388
await this.uploadIncompletePart(metadata.file.id, readable)
388389
}

packages/s3-store/test/index.ts

Lines changed: 8 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -200,44 +200,30 @@ describe('S3DataStore', () => {
200200

201201
it('should use strictly sequential part numbers when uploading multiple chunks', async () => {
202202
const store = new S3Store({
203-
partSize: 1 * 1024 * 1024,
203+
partSize: 5 * 1024 * 1024,
204204
maxConcurrentPartUploads: 1,
205205
s3ClientConfig,
206206
})
207207

208208
// @ts-expect-error private method
209209
const uploadPartSpy = sinon.spy(store, 'uploadPart')
210210

211-
// 5.5 MiB total size => at 1 MiB partSize, we will get at least 6 chunks
212-
const TOTAL_SIZE = 5.5 * 1024 * 1024
211+
const size = 15 * 1024 * 1024
213212
const upload = new Upload({
214-
id: shared.testId('double-increment-bug'),
215-
size: TOTAL_SIZE,
213+
id: shared.testId('increment-bug'),
214+
size: size,
216215
offset: 0,
217216
})
218217

219218
await store.create(upload)
220219

221-
let offset = await store.write(
222-
Readable.from(Buffer.alloc(3 * 1024 * 1024)),
223-
upload.id,
224-
upload.offset
225-
)
226-
assert.equal(offset, 3 * 1024 * 1024, 'Offset should be 3 MiB now')
220+
// Write all 15 MB in a single call (S3Store will internally chunk to ~3 parts):
221+
const offset = await store.write(Readable.from(Buffer.alloc(size)), upload.id, 0)
227222

228-
offset = await store.write(
229-
Readable.from(Buffer.alloc(2.5 * 1024 * 1024)),
230-
upload.id,
231-
offset
232-
)
233-
assert.equal(offset, TOTAL_SIZE, 'Offset should match total size')
223+
assert.equal(offset, size)
234224

235225
const finalUpload = await store.getUpload(upload.id)
236-
assert.equal(
237-
finalUpload.offset,
238-
TOTAL_SIZE,
239-
'getUpload offset should match total size'
240-
)
226+
assert.equal(finalUpload.offset, size, 'getUpload offset should match total size')
241227

242228
const partNumbers = uploadPartSpy.getCalls().map((call) => call.args[2])
243229

0 commit comments

Comments
 (0)