Skip to content

Commit b1c07bc

Browse files
authored
@tus/s3-store: Change private modifier into protected (#698)
1 parent 7182c7b commit b1c07bc

File tree

2 files changed

+30
-25
lines changed

2 files changed

+30
-25
lines changed

.changeset/cyan-hornets-repair.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"@tus/s3-store": minor
3+
---
4+
5+
Change private modifier to protected

packages/s3-store/src/index.ts

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import path from 'node:path'
2424

2525
const log = debug('tus-node-server:stores:s3store')
2626

27-
type Options = {
27+
export type Options = {
2828
// The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
2929
// The server calculates the optimal part size, which takes this size into account,
3030
// but may increase it to not exceed the S3 10K parts limit.
@@ -82,13 +82,13 @@ function calcOffsetFromParts(parts?: Array<AWS.Part>) {
8282
// For each incoming PATCH request (a call to `write`), a new part is uploaded
8383
// to S3.
8484
export class S3Store extends DataStore {
85-
private bucket: string
86-
private cache: KvStore<MetadataValue>
87-
private client: S3
88-
private preferredPartSize: number
89-
private expirationPeriodInMilliseconds = 0
90-
private useTags = true
91-
private partUploadSemaphore: Semaphore
85+
protected bucket: string
86+
protected cache: KvStore<MetadataValue>
87+
protected client: S3
88+
protected preferredPartSize: number
89+
protected expirationPeriodInMilliseconds = 0
90+
protected useTags = true
91+
protected partUploadSemaphore: Semaphore
9292
public maxMultipartParts = 10_000 as const
9393
public minPartSize = 5_242_880 as const // 5MiB
9494
public maxUploadSize = 5_497_558_138_880 as const // 5TiB
@@ -131,7 +131,7 @@ export class S3Store extends DataStore {
131131
* on the S3 object's `Metadata` field, so that only a `headObject`
132132
* is necessary to retrieve the data.
133133
*/
134-
private async saveMetadata(upload: Upload, uploadId: string) {
134+
protected async saveMetadata(upload: Upload, uploadId: string) {
135135
log(`[${upload.id}] saving metadata`)
136136
await this.client.putObject({
137137
Bucket: this.bucket,
@@ -146,7 +146,7 @@ export class S3Store extends DataStore {
146146
log(`[${upload.id}] metadata file saved`)
147147
}
148148

149-
private async completeMetadata(upload: Upload) {
149+
protected async completeMetadata(upload: Upload) {
150150
if (!this.shouldUseExpirationTags()) {
151151
return
152152
}
@@ -169,7 +169,7 @@ export class S3Store extends DataStore {
169169
* There's a small and simple caching mechanism to avoid multiple
170170
* HTTP calls to S3.
171171
*/
172-
private async getMetadata(id: string): Promise<MetadataValue> {
172+
protected async getMetadata(id: string): Promise<MetadataValue> {
173173
const cached = await this.cache.get(id)
174174
if (cached) {
175175
return cached
@@ -196,11 +196,11 @@ export class S3Store extends DataStore {
196196
return metadata
197197
}
198198

199-
private infoKey(id: string) {
199+
protected infoKey(id: string) {
200200
return `${id}.info`
201201
}
202202

203-
private partKey(id: string, isIncomplete = false) {
203+
protected partKey(id: string, isIncomplete = false) {
204204
if (isIncomplete) {
205205
id += '.part'
206206
}
@@ -212,7 +212,7 @@ export class S3Store extends DataStore {
212212
return id
213213
}
214214

215-
private async uploadPart(
215+
protected async uploadPart(
216216
metadata: MetadataValue,
217217
readStream: fs.ReadStream | Readable,
218218
partNumber: number
@@ -228,7 +228,7 @@ export class S3Store extends DataStore {
228228
return data.ETag as string
229229
}
230230

231-
private async uploadIncompletePart(
231+
protected async uploadIncompletePart(
232232
id: string,
233233
readStream: fs.ReadStream | Readable
234234
): Promise<string> {
@@ -242,7 +242,7 @@ export class S3Store extends DataStore {
242242
return data.ETag as string
243243
}
244244

245-
private async downloadIncompletePart(id: string) {
245+
protected async downloadIncompletePart(id: string) {
246246
const incompletePart = await this.getIncompletePart(id)
247247

248248
if (!incompletePart) {
@@ -301,7 +301,7 @@ export class S3Store extends DataStore {
301301
}
302302
}
303303

304-
private async getIncompletePart(id: string): Promise<Readable | undefined> {
304+
protected async getIncompletePart(id: string): Promise<Readable | undefined> {
305305
try {
306306
const data = await this.client.getObject({
307307
Bucket: this.bucket,
@@ -317,7 +317,7 @@ export class S3Store extends DataStore {
317317
}
318318
}
319319

320-
private async getIncompletePartSize(id: string): Promise<number | undefined> {
320+
protected async getIncompletePartSize(id: string): Promise<number | undefined> {
321321
try {
322322
const data = await this.client.headObject({
323323
Bucket: this.bucket,
@@ -332,7 +332,7 @@ export class S3Store extends DataStore {
332332
}
333333
}
334334

335-
private async deleteIncompletePart(id: string): Promise<void> {
335+
protected async deleteIncompletePart(id: string): Promise<void> {
336336
await this.client.deleteObject({
337337
Bucket: this.bucket,
338338
Key: this.partKey(id, true),
@@ -342,7 +342,7 @@ export class S3Store extends DataStore {
342342
/**
343343
* Uploads a stream to s3 using multiple parts
344344
*/
345-
private async uploadParts(
345+
protected async uploadParts(
346346
metadata: MetadataValue,
347347
readStream: stream.Readable,
348348
currentPartNumber: number,
@@ -429,7 +429,7 @@ export class S3Store extends DataStore {
429429
* Completes a multipart upload on S3.
430430
* This is where S3 concatenates all the uploaded parts.
431431
*/
432-
private async finishMultipartUpload(metadata: MetadataValue, parts: Array<AWS.Part>) {
432+
protected async finishMultipartUpload(metadata: MetadataValue, parts: Array<AWS.Part>) {
433433
const response = await this.client.completeMultipartUpload({
434434
Bucket: this.bucket,
435435
Key: metadata.file.id,
@@ -450,7 +450,7 @@ export class S3Store extends DataStore {
450450
* Gets the number of complete parts/chunks already uploaded to S3.
451451
* Retrieves only consecutive parts.
452452
*/
453-
private async retrieveParts(
453+
protected async retrieveParts(
454454
id: string,
455455
partNumberMarker?: string
456456
): Promise<Array<AWS.Part>> {
@@ -483,12 +483,12 @@ export class S3Store extends DataStore {
483483
/**
484484
* Removes cached data for a given file.
485485
*/
486-
private async clearCache(id: string) {
486+
protected async clearCache(id: string) {
487487
log(`[${id}] removing cached data`)
488488
await this.cache.delete(id)
489489
}
490490

491-
private calcOptimalPartSize(size?: number): number {
491+
protected calcOptimalPartSize(size?: number): number {
492492
// When upload size is not know we assume largest possible value (`maxUploadSize`)
493493
if (size === undefined) {
494494
size = this.maxUploadSize
@@ -776,7 +776,7 @@ export class S3Store extends DataStore {
776776
return deleted
777777
}
778778

779-
private async uniqueTmpFileName(template: string): Promise<string> {
779+
protected async uniqueTmpFileName(template: string): Promise<string> {
780780
let tries = 0
781781
const maxTries = 10
782782

0 commit comments

Comments
 (0)