Skip to content

Commit 0bab40f

Browse files
authored
@tus/server: Split to multiple chunks on large incoming buffer (#506)
* @tus/server: Split to multiple chunks on large incoming buffer * format code
1 parent 3e36f4d commit 0bab40f

File tree

2 files changed

+33
-6
lines changed

2 files changed

+33
-6
lines changed

packages/server/src/models/StreamSplitter.ts

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,18 +44,19 @@ export class StreamSplitter extends stream.Writable {
4444
await this._newChunk()
4545
}
4646

47-
const overflow = this.currentChunkSize + chunk.length - this.chunkSize
47+
let overflow = this.currentChunkSize + chunk.length - this.chunkSize
48+
4849
// The current chunk will be more than our defined part size if we would
4950
// write all of it to disk.
50-
if (overflow > 0) {
51+
while (overflow > 0) {
5152
// Only write to disk the up to our defined part size.
52-
await this._writeChunk(chunk.slice(0, chunk.length - overflow))
53+
await this._writeChunk(chunk.subarray(0, chunk.length - overflow))
5354
await this._finishChunk()
55+
5456
// We still have some overflow left, so we write it to a new chunk.
5557
await this._newChunk()
56-
await this._writeChunk(chunk.slice(chunk.length - overflow, chunk.length))
57-
callback(null)
58-
return
58+
chunk = chunk.subarray(chunk.length - overflow, chunk.length)
59+
overflow = this.currentChunkSize + chunk.length - this.chunkSize
5960
}
6061

6162
// The chunk is smaller than our defined part size so we can just write it to disk.

packages/server/test/StreamSplitter.test.ts

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import stream from 'node:stream/promises'
44
import {strict as assert} from 'node:assert'
55

66
import {StreamSplitter} from '../src/models'
7+
import {Readable} from 'node:stream'
78

89
const fileSize = 20_971_520
910

@@ -25,4 +26,29 @@ describe('StreamSplitter', () => {
2526
await stream.pipeline(readStream, splitterStream)
2627
assert.equal(offset, fileSize)
2728
})
29+
30+
it('should split to multiple chunks when single buffer exceeds chunk size', async () => {
31+
const optimalChunkSize = 1024
32+
const expectedChunks = 7
33+
34+
const readStream = Readable.from([Buffer.alloc(expectedChunks * optimalChunkSize)])
35+
36+
let chunksStarted = 0
37+
let chunksFinished = 0
38+
const splitterStream = new StreamSplitter({
39+
chunkSize: optimalChunkSize,
40+
directory: os.tmpdir(),
41+
})
42+
.on('chunkStarted', () => {
43+
chunksStarted++
44+
})
45+
.on('chunkFinished', () => {
46+
chunksFinished++
47+
})
48+
49+
await stream.pipeline(readStream, splitterStream)
50+
51+
assert.equal(chunksStarted, expectedChunks)
52+
assert.equal(chunksFinished, expectedChunks)
53+
})
2854
})

0 commit comments

Comments
 (0)