@@ -200,44 +200,30 @@ describe('S3DataStore', () => {
200200
201201 it ( 'should use strictly sequential part numbers when uploading multiple chunks' , async ( ) => {
202202 const store = new S3Store ( {
203- partSize : 1 * 1024 * 1024 ,
203+ partSize : 5 * 1024 * 1024 ,
204204 maxConcurrentPartUploads : 1 ,
205205 s3ClientConfig,
206206 } )
207207
208208 // @ts -expect-error private method
209209 const uploadPartSpy = sinon . spy ( store , 'uploadPart' )
210210
211- // 5.5 MiB total size => at 1 MiB partSize, we will get at least 6 chunks
212- const TOTAL_SIZE = 5.5 * 1024 * 1024
211+ const size = 15 * 1024 * 1024
213212 const upload = new Upload ( {
214- id : shared . testId ( 'double- increment-bug' ) ,
215- size : TOTAL_SIZE ,
213+ id : shared . testId ( 'increment-bug' ) ,
214+ size : size ,
216215 offset : 0 ,
217216 } )
218217
219218 await store . create ( upload )
220219
221- let offset = await store . write (
222- Readable . from ( Buffer . alloc ( 3 * 1024 * 1024 ) ) ,
223- upload . id ,
224- upload . offset
225- )
226- assert . equal ( offset , 3 * 1024 * 1024 , 'Offset should be 3 MiB now' )
220+ // Write all 15 MB in a single call (S3Store will internally chunk to ~3 parts):
221+ const offset = await store . write ( Readable . from ( Buffer . alloc ( size ) ) , upload . id , 0 )
227222
228- offset = await store . write (
229- Readable . from ( Buffer . alloc ( 2.5 * 1024 * 1024 ) ) ,
230- upload . id ,
231- offset
232- )
233- assert . equal ( offset , TOTAL_SIZE , 'Offset should match total size' )
223+ assert . equal ( offset , size )
234224
235225 const finalUpload = await store . getUpload ( upload . id )
236- assert . equal (
237- finalUpload . offset ,
238- TOTAL_SIZE ,
239- 'getUpload offset should match total size'
240- )
226+ assert . equal ( finalUpload . offset , size , 'getUpload offset should match total size' )
241227
242228 const partNumbers = uploadPartSpy . getCalls ( ) . map ( ( call ) => call . args [ 2 ] )
243229
0 commit comments