diff --git a/internal/mock/BUILD.bazel b/internal/mock/BUILD.bazel index bf2061c7..18927d52 100644 --- a/internal/mock/BUILD.bazel +++ b/internal/mock/BUILD.bazel @@ -185,6 +185,7 @@ gomock( out = "filesystem_filepool.go", interfaces = [ "FilePool", + "ReaderAt", ], library = "//pkg/filesystem/pool", mockgen_model_library = "@org_uber_go_mock//mockgen/model", diff --git a/pkg/builder/file_pool_stats_build_executor.go b/pkg/builder/file_pool_stats_build_executor.go index fd9a9d17..978608ac 100644 --- a/pkg/builder/file_pool_stats_build_executor.go +++ b/pkg/builder/file_pool_stats_build_executor.go @@ -57,8 +57,8 @@ type statsCollectingFilePool struct { totalFiles uint64 } -func (fp *statsCollectingFilePool) NewFile() (filesystem.FileReadWriter, error) { - f, err := fp.base.NewFile() +func (fp *statsCollectingFilePool) NewFile(sparseReaderAt pool.SparseReaderAt, size uint64) (filesystem.FileReadWriter, error) { + f, err := fp.base.NewFile(sparseReaderAt, size) if err != nil { return nil, err } diff --git a/pkg/builder/file_pool_stats_build_executor_test.go b/pkg/builder/file_pool_stats_build_executor_test.go index f64e2470..d0c78391 100644 --- a/pkg/builder/file_pool_stats_build_executor_test.go +++ b/pkg/builder/file_pool_stats_build_executor_test.go @@ -44,12 +44,12 @@ func TestFilePoolStatsBuildExecutorExample(t *testing.T) { digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5), request, gomock.Any()).DoAndReturn(func(ctx context.Context, filePool pool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { - f, err := filePool.NewFile() + f, err := filePool.NewFile(nil, 0) require.NoError(t, err) require.NoError(t, f.Truncate(5)) require.NoError(t, f.Close()) - f, err = filePool.NewFile() + f, err = filePool.NewFile(nil, 0) require.NoError(t, err) n, err := f.WriteAt([]byte("Hello"), 100) require.Equal(t, 5, n) @@ -75,10 +75,10 @@ func TestFilePoolStatsBuildExecutorExample(t *testing.T) { file1 := mock.NewMockFileReadWriter(ctrl) file2 := mock.NewMockFileReadWriter(ctrl) - filePool.EXPECT().NewFile().Return(file1, nil) + filePool.EXPECT().NewFile(nil, uint64(0)).Return(file1, nil) file1.EXPECT().Truncate(int64(5)).Return(nil) file1.EXPECT().Close().Return(nil) - filePool.EXPECT().NewFile().Return(file2, nil) + filePool.EXPECT().NewFile(nil, uint64(0)).Return(file2, nil) file2.EXPECT().WriteAt([]byte("Hello"), int64(100)).Return(5, nil) file2.EXPECT().ReadAt(gomock.Any(), int64(98)).DoAndReturn(func(p []byte, offset int64) (int, error) { copy(p, []byte("\x00\x00Hello\x00\x00\x00")) diff --git a/pkg/filesystem/pool/BUILD.bazel b/pkg/filesystem/pool/BUILD.bazel index d15f1d50..e3cdcc79 100644 --- a/pkg/filesystem/pool/BUILD.bazel +++ b/pkg/filesystem/pool/BUILD.bazel @@ -9,6 +9,9 @@ go_library( "file_pool.go", "metrics_file_pool.go", "quota_enforcing_file_pool.go", + "simple_sparse_reader_at.go", + "sparse_reader_at.go", + "truncatable_sparse_reader_at.go", ], importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem/pool", visibility = ["//visibility:public"], @@ -30,6 +33,8 @@ go_test( "block_device_backed_file_pool_test.go", "empty_file_pool_test.go", "quota_enforcing_file_pool_test.go", + "simple_sparse_reader_at_test.go", + "truncatable_sparse_reader_at_test.go", ], deps = [ ":pool", diff --git a/pkg/filesystem/pool/block_device_backed_file_pool.go b/pkg/filesystem/pool/block_device_backed_file_pool.go index 5f50c9e0..a4c18fd3 100644 --- a/pkg/filesystem/pool/block_device_backed_file_pool.go +++ b/pkg/filesystem/pool/block_device_backed_file_pool.go @@ -3,6 +3,7 @@ package pool import ( "fmt" "io" + "strings" re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-storage/pkg/blockdevice" @@ -33,16 +34,31 @@ func NewBlockDeviceBackedFilePool(blockDevice blockdevice.BlockDevice, sectorAll } } -func (fp *blockDeviceBackedFilePool) NewFile() (filesystem.FileReadWriter, error) { - return &blockDeviceBackedFile{ - fp: fp, - }, nil +func (fp *blockDeviceBackedFilePool) NewFile(sparseReaderAt SparseReaderAt, initialSize uint64) (filesystem.FileReadWriter, error) { + var err error + if sparseReaderAt == nil { + if initialSize != 0 { + return nil, status.Errorf(codes.InvalidArgument, "initial size must be zero when sparseReaderAt is nil") + } + if sparseReaderAt, err = NewSimpleSparseReaderAt(strings.NewReader(""), nil, 0); err != nil { + return nil, status.Errorf(codes.Internal, "failed to create empty SparseReaderAt: %v", err) + } + } + fr := &blockDeviceBackedFile{ + fp: fp, + underlying: NewTruncatableSparseReaderAt(sparseReaderAt, int64(initialSize)), + } + if err = fr.Truncate(int64(initialSize)); err != nil { + return nil, status.Errorf(codes.Internal, "failed to truncate file to initial size: %v", err) + } + return fr, nil } type blockDeviceBackedFile struct { - fp *blockDeviceBackedFilePool - sizeBytes uint64 - sectors []uint32 + fp *blockDeviceBackedFilePool + underlying TruncatableSparseReaderAt + sizeBytes uint64 + sectors []uint32 } func (f *blockDeviceBackedFile) Close() error { @@ -115,7 +131,7 @@ func (f *blockDeviceBackedFile) limitBufferToSectorBoundary(p []byte, sectorCoun return p } -func (f *blockDeviceBackedFile) GetNextRegionOffset(off int64, regionType filesystem.RegionType) (int64, error) { +func (f *blockDeviceBackedFile) getNextRegionOffsetForOverlay(off int64, regionType filesystem.RegionType) (int64, error) { // Short circuit calls that are out of bounds. if off < 0 { return 0, status.Errorf(codes.InvalidArgument, "Negative seek offset: %d", off) @@ -165,6 +181,77 @@ func (f *blockDeviceBackedFile) GetNextRegionOffset(off int64, regionType filesy } } +func (f *blockDeviceBackedFile) GetNextRegionOffset(off int64, regionType filesystem.RegionType) (int64, error) { + // Short circuit calls that are out of bounds. + if off < 0 { + return 0, status.Errorf(codes.InvalidArgument, "Negative seek offset: %d", off) + } + if uint64(off) >= f.sizeBytes { + return 0, io.EOF + } + + // Data is represented by the existence of a written sector in + // either the overlay or the underlying file. Holes are represented + // by the absence of a written sector in the overlay _and_ a hole in + // the underlying file. + // + // For data this is the lowest valued offset of the two candidates. + // For holes it's the first position which both sources agree upon + // are holes. + switch regionType { + case filesystem.Data: + data1, err := f.underlying.GetNextRegionOffset(off, filesystem.Data) + if err == io.EOF { + // No more data in the underlying file. Return the result + // from the overlay. + return f.getNextRegionOffsetForOverlay(off, filesystem.Data) + } + if err != nil { + return data1, status.Errorf(codes.Internal, "unexpected error while searching for data in underlying file: %v", err) + } + data2, err := f.getNextRegionOffsetForOverlay(off, filesystem.Data) + if err == io.EOF { + // No more data in the overlay, return the data from the + // underlying file. + return data1, nil + } + if err != nil { + return data2, status.Errorf(codes.Internal, "unexpected error while searching for data in underlying file: %v", err) + } + if data1 < data2 { + return data1, nil + } + return data2, nil + case filesystem.Hole: + for { + // Since we have already ruled out that we are past the EOF + // boundary no calls to GetNextRegionOffset should be + // capable of returning holes. + hole1, err := f.underlying.GetNextRegionOffset(off, filesystem.Hole) + if err != nil { + return hole1, status.Errorf(codes.Internal, "unexpected error while searching for hole in underlying file: %v", err) + } + hole2, err := f.getNextRegionOffsetForOverlay(off, filesystem.Hole) + if err != nil { + return hole2, status.Errorf(codes.Internal, "unexpected error while searching for hole in overlay file: %v", err) + } + if hole1 == hole2 { + // Both sources agree that it's a hole. + return hole1, nil + } + if hole1 == int64(f.sizeBytes) || hole2 == int64(f.sizeBytes) { + // The only possible hole is the implicit hole at the + // end of the file. + return int64(f.sizeBytes), nil + } + // Continue searching at the next possible offset. + off = max(hole1, hole2) + } + default: + panic("Unknown region type") + } +} + // readFromSectors performs a single read against the block device. It // attempts to read as much data into the output buffer as is possible // in a single read operation. If the file is fragmented, multiple reads @@ -172,23 +259,17 @@ func (f *blockDeviceBackedFile) GetNextRegionOffset(off int64, regionType filesy func (f *blockDeviceBackedFile) readFromSectors(p []byte, sectorIndex, lastSectorIndex, offsetWithinSector int) (int, error) { if sectorIndex >= len(f.sectors) { // Attempted to read from a hole located at the - // end of the file. Fill up all of the remaining - // space with zero bytes. - for i := 0; i < len(p); i++ { - p[i] = 0 - } - return len(p), nil + // end of the file. Delegate to ReadLayer. + offset := f.fp.sectorSizeBytes*sectorIndex + offsetWithinSector + return f.underlying.ReadAt(p, int64(offset)) } sector, sectorsToRead := f.getSectorsContiguous(sectorIndex, lastSectorIndex) p = f.limitBufferToSectorBoundary(p, sectorsToRead, offsetWithinSector) if sector == 0 { // Attempted to read from a sparse region of the file. - // Fill in zero bytes. - for i := 0; i < len(p); i++ { - p[i] = 0 - } - return len(p), nil + offset := f.fp.sectorSizeBytes*sectorIndex + offsetWithinSector + return f.underlying.ReadAt(p, int64(offset)) } // Attempted to read from a region of the file that contains @@ -267,6 +348,9 @@ func (f *blockDeviceBackedFile) Truncate(size int64) error { if size < 0 { return status.Errorf(codes.InvalidArgument, "Negative truncation size: %d", size) } + if err := f.underlying.Truncate(size); err != nil { + return status.Errorf(codes.Internal, "truncating the underlying file failed: %v", err) + } sectorIndex := int(size / int64(f.fp.sectorSizeBytes)) offsetWithinSector := int(size % int64(f.fp.sectorSizeBytes)) @@ -299,7 +383,7 @@ func (f *blockDeviceBackedFile) Truncate(size int64) error { // writeToNewSectors is used to write data into new sectors. This // function is called when holes in a sparse file are filled up or when // data is appended to the end of a file. -func (f *blockDeviceBackedFile) writeToNewSectors(p []byte, offsetWithinSector int) (int, uint32, int, error) { +func (f *blockDeviceBackedFile) writeToNewSectors(p []byte, fromSector, offsetWithinSector int) (int, uint32, int, error) { // Allocate space to store the data. sectorsToAllocate := int((uint64(offsetWithinSector) + uint64(len(p)) + uint64(f.fp.sectorSizeBytes) - 1) / uint64(f.fp.sectorSizeBytes)) firstSector, sectorsAllocated, err := f.fp.sectorAllocator.AllocateContiguous(sectorsToAllocate) @@ -314,10 +398,15 @@ func (f *blockDeviceBackedFile) writeToNewSectors(p []byte, offsetWithinSector i nWritten := len(p) // Write the first sector separately when we need to introduce - // leading zero padding. + // leading read layer padding. sector := firstSector if offsetWithinSector > 0 { buf := make([]byte, f.fp.sectorSizeBytes) + logicalOffset := fromSector * f.fp.sectorSizeBytes + if _, err := f.underlying.ReadAt(buf[:offsetWithinSector], int64(logicalOffset)); err != nil { + f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) + return 0, 0, 0, err + } nWritten := copy(buf[offsetWithinSector:], p) if _, err := f.fp.blockDevice.WriteAt(buf, f.toDeviceOffset(sector, 0)); err != nil { f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) @@ -340,9 +429,14 @@ func (f *blockDeviceBackedFile) writeToNewSectors(p []byte, offsetWithinSector i } // Write the last sector separately when we need to introduce - // trailing zero padding. + // trailing read layer padding. if len(p) > 0 { buf := make([]byte, f.fp.sectorSizeBytes) + logicalOffset := uint32(len(p)) + (sector-firstSector)*uint32(f.fp.sectorSizeBytes) + if _, err := f.underlying.ReadAt(buf[len(p):], int64(logicalOffset)); err != nil { + f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) + return 0, 0, 0, err + } copy(buf, p) if _, err := f.fp.blockDevice.WriteAt(buf, f.toDeviceOffset(sector, 0)); err != nil { f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) @@ -375,7 +469,7 @@ func (f *blockDeviceBackedFile) writeToSectors(p []byte, sectorIndex, lastSector // Attempted to write past the end-of-file or within a // hole located at the end of a sparse file. Allocate // space and grow the file. - bytesWritten, firstSector, sectorsAllocated, err := f.writeToNewSectors(p, offsetWithinSector) + bytesWritten, firstSector, sectorsAllocated, err := f.writeToNewSectors(p, sectorIndex, offsetWithinSector) if err != nil { return 0, err } @@ -389,7 +483,7 @@ func (f *blockDeviceBackedFile) writeToSectors(p []byte, sectorIndex, lastSector if sector == 0 { // Attempted to write to a hole within a sparse file. // Allocate space and insert sectors into the file. - bytesWritten, firstSector, sectorsAllocated, err := f.writeToNewSectors(p, offsetWithinSector) + bytesWritten, firstSector, sectorsAllocated, err := f.writeToNewSectors(p, sectorIndex, offsetWithinSector) if err != nil { return 0, err } @@ -409,6 +503,13 @@ func (f *blockDeviceBackedFile) WriteAt(p []byte, off int64) (int, error) { if len(p) == 0 { return 0, nil } + // Truncate the file to a larger size if needed to accomodate the + // read. + if f.sizeBytes < uint64(off)+uint64(len(p)) { + if err := f.Truncate(off + int64(len(p))); err != nil { + return 0, err + } + } // As the file may be stored on disk non-contiguously or may be // a sparse file with holes, the write may need to be decomposed diff --git a/pkg/filesystem/pool/block_device_backed_file_pool_test.go b/pkg/filesystem/pool/block_device_backed_file_pool_test.go index ac96c0ea..91e2b3db 100644 --- a/pkg/filesystem/pool/block_device_backed_file_pool_test.go +++ b/pkg/filesystem/pool/block_device_backed_file_pool_test.go @@ -3,6 +3,7 @@ package pool_test import ( "io" "math" + "strings" "testing" "github.com/buildbarn/bb-remote-execution/internal/mock" @@ -26,7 +27,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { t.Run("ReadEmptyFile", func(t *testing.T) { // Test that reads on an empty file work as expected. - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) var p [10]byte @@ -54,7 +55,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("Truncate", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) // Invalid size. @@ -102,7 +103,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("WritesAndReadOnSingleSector", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) // The initial write to a sector should cause the full @@ -139,7 +140,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("WriteFragmentation", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) // Simulate the case where 137 bytes of data needs to be @@ -172,7 +173,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("WriteSectorAllocatorFailure", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) // Failure to allocate sectors should cause the write to @@ -192,7 +193,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("WriteIOFailure", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) // Write failures to freshly allocator sectors should @@ -215,7 +216,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { t.Run("GetNextRegionOffset", func(t *testing.T) { // Test the behavior on empty files. - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) _, err = f.GetNextRegionOffset(-1, filesystem.Data) @@ -313,10 +314,172 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("WriteAt", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) _, err = f.WriteAt([]byte{0}, -1) testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Negative write offset: -1"), err) }) } + +func TestFilePoolWithSparseUnderlyingFile(t *testing.T) { + ctrl := gomock.NewController(t) + blockDevice := mock.NewMockBlockDevice(ctrl) + sectorAllocator := mock.NewMockSectorAllocator(ctrl) + filepool := pool.NewBlockDeviceBackedFilePool(blockDevice, sectorAllocator, 16) + + t.Run("ReadFromHoleReturnsBackingData", func(t *testing.T) { + sparseReaderAt, err := pool.NewSimpleSparseReaderAt(strings.NewReader("HelloWorld"), nil, 10) + require.NoError(t, err) + file, err := filepool.NewFile(sparseReaderAt, 10) + require.NoError(t, err) + + buf := make([]byte, 10) + n, err := file.ReadAt(buf, 0) + require.Equal(t, err, io.EOF) + require.Equal(t, 10, n) + require.Equal(t, []byte("HelloWorld"), buf) + + require.NoError(t, file.Close()) + }) + + t.Run("CannotReadFromHoleBeyondBacking", func(t *testing.T) { + sparseReaderAt, err := pool.NewSimpleSparseReaderAt(strings.NewReader("abc"), nil, 3) + require.NoError(t, err) + file, err := filepool.NewFile(sparseReaderAt, 3) + require.NoError(t, err) + + buf := make([]byte, 6) + n, err := file.ReadAt(buf, 0) + require.Equal(t, err, io.EOF) + require.Equal(t, 3, n) + require.Equal(t, []byte("abc\x00\x00\x00"), buf) + + require.NoError(t, file.Close()) + }) + + t.Run("TruncatePropagatesToBackingLayer", func(t *testing.T) { + sparseReaderAt, err := pool.NewSimpleSparseReaderAt(strings.NewReader("abcdefhij"), nil, 10) + require.NoError(t, err) + file, err := filepool.NewFile(sparseReaderAt, 10) + require.NoError(t, err) + + require.NoError(t, file.Truncate(4)) + buf := make([]byte, 6) + n, err := file.ReadAt(buf, 0) + require.Equal(t, 4, n) + require.Equal(t, io.EOF, err) + require.Equal(t, []byte("abcd\x00\x00"), buf) + + require.NoError(t, file.Close()) + }) + + t.Run("TruncateHasNoGhosting", func(t *testing.T) { + sparseReaderAt, err := pool.NewSimpleSparseReaderAt(strings.NewReader("abcdefhij"), nil, 10) + require.NoError(t, err) + file, err := filepool.NewFile(sparseReaderAt, 10) + require.NoError(t, err) + // shrunk to 4 bytes + require.NoError(t, file.Truncate(4)) + buf := make([]byte, 6) + n, err := file.ReadAt(buf, 0) + require.Equal(t, 4, n) + require.Equal(t, io.EOF, err) + require.Equal(t, []byte("abcd\x00\x00"), buf) + // grow to 10 bytes + require.NoError(t, file.Truncate(10)) + n, err = file.ReadAt(buf, 0) + require.Equal(t, 6, n) + require.NoError(t, err) + require.Equal(t, []byte("abcd\x00\x00"), buf) + + require.NoError(t, file.Close()) + }) + + t.Run("WriteOverridesBackingLayer", func(t *testing.T) { + // Use 4 byte sectors for clarity. + filepool := pool.NewBlockDeviceBackedFilePool(blockDevice, sectorAllocator, 4) + // Each letter covers a full sector. + sparseReaderAt, err := pool.NewSimpleSparseReaderAt(strings.NewReader("AAAABBBBCCCC"), nil, 12) + file, err := filepool.NewFile(sparseReaderAt, 12) + require.NoError(t, err) + + // Write ZZZ at offset 2, this spans the first two sectors which + // requires us to bring them into our sparse file. We will allocate + // sector 10 and 11 of our block device for this (address 36 and 40). + sectorAllocator.EXPECT().AllocateContiguous(2).Return(uint32(10), 2, nil) + blockDevice.EXPECT().WriteAt([]byte("AAZZ"), int64(36)).Return(4, nil) + blockDevice.EXPECT().WriteAt([]byte("ZBBB"), int64(40)).Return(4, nil) + n, err := file.WriteAt([]byte("ZZZ"), 2) + require.NoError(t, err) + require.Equal(t, 3, n) + + // This data would then be expected to be read back to us. + blockDevice.EXPECT().ReadAt(gomock.Len(8), int64(36)).DoAndReturn( + func(p []byte, offset int64) (int, error) { + copy(p, []byte("AAZZZBBB")) + return 8, nil + }, + ) + + buf := make([]byte, 12) + n, err = file.ReadAt(buf, 0) + require.Equal(t, io.EOF, err) + require.Equal(t, 12, n) + require.Equal(t, []byte("AAZZZBBBCCCC"), buf) + + sectorAllocator.EXPECT().FreeList([]uint32{10, 11}) + require.NoError(t, file.Close()) + }) + + t.Run("EffectivelyDense", func(t *testing.T) { + // Use 2 byte sectors. + filepool := pool.NewBlockDeviceBackedFilePool(blockDevice, sectorAllocator, 2) + // Use the sparse string 'xxBBxxDD' for the underlying. + sparseReader, err := pool.NewSimpleSparseReaderAt(strings.NewReader("\x00\x00BB\x00\x00DD"), []pool.Range{{Off: 0, Len: 2}, {Off: 4, Len: 2}}, 8) + require.NoError(t, err) + file, err := filepool.NewFile(sparseReader, 8) + require.NoError(t, err) + + // Write exactly AA and DD to the overlay. + sectorAllocator.EXPECT().AllocateContiguous(1).Return(uint32(10), 1, nil) + sectorAllocator.EXPECT().AllocateContiguous(1).Return(uint32(11), 1, nil) + blockDevice.EXPECT().WriteAt([]byte("AA"), int64(18)).Return(2, nil) + blockDevice.EXPECT().WriteAt([]byte("CC"), int64(20)).Return(2, nil) + n, err := file.WriteAt([]byte("AA"), 0) + require.NoError(t, err) + require.Equal(t, n, 2) + n, err = file.WriteAt([]byte("CC"), 4) + require.NoError(t, err) + require.Equal(t, n, 2) + + // Read it back. + blockDevice.EXPECT().ReadAt(gomock.Len(2), int64(18)).DoAndReturn( + func(p []byte, offset int64) (int, error) { + copy(p, []byte("AA")) + return 2, nil + }, + ) + blockDevice.EXPECT().ReadAt(gomock.Len(2), int64(20)).DoAndReturn( + func(p []byte, offset int64) (int, error) { + copy(p, []byte("CC")) + return 2, nil + }, + ) + + buf := make([]byte, 8) + n, err = file.ReadAt(buf, 0) + require.Equal(t, 8, n) + require.Equal(t, []byte("AABBCCDD"), buf) + + // Verify sparsity + for i := int64(0); i < 8; i++ { + o, err := file.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, i, o) + o, err = file.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(8), o) + } + }) +} diff --git a/pkg/filesystem/pool/empty_file_pool.go b/pkg/filesystem/pool/empty_file_pool.go index cc788f54..ca01591c 100644 --- a/pkg/filesystem/pool/empty_file_pool.go +++ b/pkg/filesystem/pool/empty_file_pool.go @@ -9,7 +9,7 @@ import ( type emptyFilePool struct{} -func (fp emptyFilePool) NewFile() (filesystem.FileReadWriter, error) { +func (fp emptyFilePool) NewFile(SparseReaderAt, uint64) (filesystem.FileReadWriter, error) { return nil, status.Error(codes.ResourceExhausted, "Cannot create file in empty file pool") } diff --git a/pkg/filesystem/pool/empty_file_pool_test.go b/pkg/filesystem/pool/empty_file_pool_test.go index 3faca7bc..66e303ab 100644 --- a/pkg/filesystem/pool/empty_file_pool_test.go +++ b/pkg/filesystem/pool/empty_file_pool_test.go @@ -11,6 +11,6 @@ import ( ) func TestEmptyFilePool(t *testing.T) { - _, err := pool.EmptyFilePool.NewFile() + _, err := pool.EmptyFilePool.NewFile(nil, 0) require.Equal(t, err, status.Error(codes.ResourceExhausted, "Cannot create file in empty file pool")) } diff --git a/pkg/filesystem/pool/file_pool.go b/pkg/filesystem/pool/file_pool.go index 77c3a67b..071f1ce9 100644 --- a/pkg/filesystem/pool/file_pool.go +++ b/pkg/filesystem/pool/file_pool.go @@ -10,6 +10,12 @@ import ( // File handles returned by NewFile() are not thread-safe. Additional // locking needs to be done at higher levels to permit safe concurrent // access. +// +// If the sparseReaderAt parameter is not nil the file is created as if +// it had the initial content of the supplied SparseReaderAt. The size +// parameter must be less than or equal to the size of the underlying +// data. If the sparseReaderAt parameter is nil the file is created +// empty. type FilePool interface { - NewFile() (filesystem.FileReadWriter, error) + NewFile(sparseReaderAt SparseReaderAt, size uint64) (filesystem.FileReadWriter, error) } diff --git a/pkg/filesystem/pool/metrics_file_pool.go b/pkg/filesystem/pool/metrics_file_pool.go index ae22e8c6..b68098e2 100644 --- a/pkg/filesystem/pool/metrics_file_pool.go +++ b/pkg/filesystem/pool/metrics_file_pool.go @@ -43,8 +43,8 @@ func NewMetricsFilePool(base FilePool) FilePool { } } -func (fp *metricsFilePool) NewFile() (filesystem.FileReadWriter, error) { - f, err := fp.base.NewFile() +func (fp *metricsFilePool) NewFile(sparseReaderAt SparseReaderAt, size uint64) (filesystem.FileReadWriter, error) { + f, err := fp.base.NewFile(sparseReaderAt, size) if err != nil { return nil, err } diff --git a/pkg/filesystem/pool/quota_enforcing_file_pool.go b/pkg/filesystem/pool/quota_enforcing_file_pool.go index 95f4309a..68a27cc1 100644 --- a/pkg/filesystem/pool/quota_enforcing_file_pool.go +++ b/pkg/filesystem/pool/quota_enforcing_file_pool.go @@ -53,11 +53,11 @@ func NewQuotaEnforcingFilePool(base FilePool, maximumFileCount, maximumTotalSize return fp } -func (fp *quotaEnforcingFilePool) NewFile() (filesystem.FileReadWriter, error) { +func (fp *quotaEnforcingFilePool) NewFile(sparseReaderAt SparseReaderAt, size uint64) (filesystem.FileReadWriter, error) { if !fp.filesRemaining.allocate(1) { return nil, status.Error(codes.InvalidArgument, "File count quota reached") } - f, err := fp.base.NewFile() + f, err := fp.base.NewFile(sparseReaderAt, size) if err != nil { fp.filesRemaining.release(1) return nil, err diff --git a/pkg/filesystem/pool/quota_enforcing_file_pool_test.go b/pkg/filesystem/pool/quota_enforcing_file_pool_test.go index db414b96..e28c624b 100644 --- a/pkg/filesystem/pool/quota_enforcing_file_pool_test.go +++ b/pkg/filesystem/pool/quota_enforcing_file_pool_test.go @@ -25,12 +25,12 @@ func testRemainingQuota(t *testing.T, ctrl *gomock.Controller, underlyingPool *m files := make([]filesystem.FileReadWriter, filesRemaining) for i := 0; i < filesRemaining; i++ { underlyingFiles[i] = mock.NewMockFileReadWriter(ctrl) - underlyingPool.EXPECT().NewFile().Return(underlyingFiles[i], nil) + underlyingPool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFiles[i], nil) var err error - files[i], err = pool.NewFile() + files[i], err = pool.NewFile(nil, 0) require.NoError(t, err) } - _, err := pool.NewFile() + _, err := pool.NewFile(nil, 0) require.Equal(t, err, status.Error(codes.InvalidArgument, "File count quota reached")) for i := 0; i < filesRemaining; i++ { underlyingFiles[i].EXPECT().Close().Return(nil) @@ -40,8 +40,8 @@ func testRemainingQuota(t *testing.T, ctrl *gomock.Controller, underlyingPool *m // Check that the remaining amount of space is available by // allocating one file and truncating it to the exact size. underlyingFile := mock.NewMockFileReadWriter(ctrl) - underlyingPool.EXPECT().NewFile().Return(underlyingFile, nil) - f, err := pool.NewFile() + underlyingPool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) + f, err := pool.NewFile(nil, 0) require.NoError(t, err) if bytesRemaining != 0 { underlyingFile.EXPECT().Truncate(bytesRemaining).Return(nil) @@ -62,15 +62,15 @@ func TestQuotaEnforcingFilePoolExample(t *testing.T) { // Failure to allocate a file from the underlying pool should // not affect the quota. - underlyingPool.EXPECT().NewFile().Return(nil, status.Error(codes.Internal, "I/O error")) - _, err := pool.NewFile() + underlyingPool.EXPECT().NewFile(nil, uint64(0)).Return(nil, status.Error(codes.Internal, "I/O error")) + _, err := pool.NewFile(nil, 0) require.Equal(t, err, status.Error(codes.Internal, "I/O error")) testRemainingQuota(t, ctrl, underlyingPool, pool, 10, 1000) // Successfully allocate a file. underlyingFile := mock.NewMockFileReadWriter(ctrl) - underlyingPool.EXPECT().NewFile().Return(underlyingFile, nil) - f, err := pool.NewFile() + underlyingPool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) + f, err := pool.NewFile(nil, 0) require.NoError(t, err) testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 1000) diff --git a/pkg/filesystem/pool/simple_sparse_reader_at.go b/pkg/filesystem/pool/simple_sparse_reader_at.go new file mode 100644 index 00000000..2b0615cb --- /dev/null +++ b/pkg/filesystem/pool/simple_sparse_reader_at.go @@ -0,0 +1,130 @@ +package pool + +import ( + "io" + "sort" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ReaderAt is type alias of io.ReaderAt used for mock generation. +type ReaderAt = io.ReaderAt + +// Range describes an offset interval. Used by SimpleSparseReaderAt to +// describe the holes in the io.ReaderAt. +type Range struct { + Off int64 + Len int64 +} + +// NewSimpleSparseReaderAt creates a SparseReaderAt from an io.ReaderAt +// and a list of holes. +// +// Note: This decorates io.ReaderAt with the necessary metadata to +// fulfill the SparseReaderAt interface. It doesn't perform the zeroing +// of reads in holes or padding of reads past the EOF boundary. It is +// assumed that the underlying io.ReaderAt already does that. +func NewSimpleSparseReaderAt(reader io.ReaderAt, holes []Range, sizeBytes int64) (SparseReaderAt, error) { + sort.Slice(holes, func(i, j int) bool { + return holes[i].Off < holes[j].Off + }) + for _, hole := range holes { + if hole.Len < 0 { + return nil, status.Errorf(codes.InvalidArgument, "invalid hole: %v", hole) + } + if hole.Off > sizeBytes { + return nil, status.Errorf(codes.InvalidArgument, "hole out of bounds: %v", hole) + } + } + // Explicitly add the implicit hole at the end of file to simplify + // the code. + holes = append(holes, Range{Off: sizeBytes, Len: 1}) + // Reduce to simplest reprentation by merging adjacent holes. + reducedHoles := make([]Range, 0, len(holes)) + prev := holes[0] + for i := 1; i < len(holes); i++ { + hole := holes[i] + prevEnd := prev.Off + prev.Len + // This hole starts before or is adjacent to the previous hole. + if hole.Off <= prevEnd { + end := hole.Off + hole.Len + // Extend the previous hole to the end of this hole. + if end > prevEnd { + prev.Len += end - prevEnd + } + continue + } + // Save it in the reduced set only if it is not a zero length + // hole. + if prev.Len > 0 { + reducedHoles = append(reducedHoles, prev) + } + prev = hole + } + reducedHoles = append(reducedHoles, prev) + return &sparseReaderAt{ + reader: reader, + holes: reducedHoles, + sizeBytes: sizeBytes, + }, nil +} + +type sparseReaderAt struct { + reader io.ReaderAt + holes []Range + sizeBytes int64 +} + +func (s *sparseReaderAt) ReadAt(p []byte, off int64) (int, error) { + n, err := s.reader.ReadAt(p, off) + return n, err +} + +func (s *sparseReaderAt) GetNextRegionOffset(offset int64, regionType filesystem.RegionType) (int64, error) { + // Find the offset of the first hole or data >= offset. Resolves it + // by binary searching for the two surrounding holes. Since we + // add the implicit hole at the end of file to the list of holes in + // the constructor we will always have atleast one hole in the list. + if offset < 0 { + return 0, status.Errorf(codes.InvalidArgument, "negative offset: %d", offset) + } + // Out of bounds. + if offset >= s.sizeBytes { + return 0, io.EOF + } + // Index of first hole with offset greater than the given offset. + nextIndex := sort.Search(len(s.holes), func(i int) bool { + return s.holes[i].Off > offset + }) + // Index of the last hole with offset less than or equal to the + // given offset. + prevIndex := nextIndex - 1 + switch regionType { + case filesystem.Hole: + if prevIndex == -1 { + return s.holes[0].Off, nil + } + prev := s.holes[prevIndex] + if prev.Off+prev.Len > offset { + return offset, nil + } + return s.holes[nextIndex].Off, nil + case filesystem.Data: + if prevIndex == -1 { + return offset, nil + } + prev := s.holes[prevIndex] + after := prev.Off + prev.Len + if after >= s.sizeBytes { + return 0, io.EOF + } + if after > offset { + return after, nil + } + return offset, nil + default: + return 0, status.Errorf(codes.InvalidArgument, "unknown region type: %v", regionType) + } +} diff --git a/pkg/filesystem/pool/simple_sparse_reader_at_test.go b/pkg/filesystem/pool/simple_sparse_reader_at_test.go new file mode 100644 index 00000000..82b3f206 --- /dev/null +++ b/pkg/filesystem/pool/simple_sparse_reader_at_test.go @@ -0,0 +1,77 @@ +package pool_test + +import ( + "io" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/pool" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestSimpleSparseReaderAt(t *testing.T) { + ctrl := gomock.NewController(t) + readerAt := mock.NewMockReaderAt(ctrl) + t.Run("TestGetNextRegionOffset", func(t *testing.T) { + // All permutations of holes in a 4 character string. Represented + // by the bitmask of holes (values 0b0000 to 0b1111). + for i := range 1 << 4 { + mask := (^uint16(0) >> 4 << 4) | uint16(i) + isHole := func(index int) bool { + return mask&(1<= uint64(r.logicalSize) { + return 0, io.EOF + } + var success error + if end := off + int64(len(p)); end >= r.logicalSize { + success = io.EOF + p = p[:r.logicalSize-off] + } + + // Bytes to read from the underlying stream. + n1 := min(int64(len(p)), max(r.sizeBytes-off, 0)) + n2, err := r.readUnderlyingSuppressEOF(p[:n1], off) + if int64(n2) != n1 || err != nil { + return n2, err + } + for i := range len(p[n1:]) { + p[n1+int64(i)] = 0 + } + return len(p), success +} + +func (r *truncatableSparseReaderAt) GetNextRegionOffset(offset int64, regionType filesystem.RegionType) (int64, error) { + // For indexes within r.sizeBytes this interface has holes where the + // underlying interface has holes and data where the underlying + // interface has data. For indexes outside of r.sizeBytes this + // interface is all holes. + if offset < 0 { + return 0, status.Errorf(codes.InvalidArgument, "negative offset %d", offset) + } + if offset >= r.logicalSize { + return 0, io.EOF + } + switch regionType { + case filesystem.Data: + if offset < r.sizeBytes { + innerOffset, err := r.base.GetNextRegionOffset(offset, regionType) + if innerOffset < r.sizeBytes { + return innerOffset, err + } + } + return 0, io.EOF + case filesystem.Hole: + if offset < r.sizeBytes { + innerOffset, err := r.base.GetNextRegionOffset(offset, regionType) + if innerOffset < r.sizeBytes { + return innerOffset, err + } + } + return max(offset, r.sizeBytes), nil + default: + return 0, status.Errorf(codes.InvalidArgument, "unknown region type %d", regionType) + } +} diff --git a/pkg/filesystem/pool/truncatable_sparse_reader_at_test.go b/pkg/filesystem/pool/truncatable_sparse_reader_at_test.go new file mode 100644 index 00000000..a6cc0c8a --- /dev/null +++ b/pkg/filesystem/pool/truncatable_sparse_reader_at_test.go @@ -0,0 +1,200 @@ +package pool_test + +import ( + "io" + "strings" + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/pool" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/stretchr/testify/require" +) + +func TestTruncatableSparseReaderAt_ReadAt(t *testing.T) { + text := "Hello World!" + underlyingSize := int64(len(text)) + sparseReader, err := pool.NewSimpleSparseReaderAt(strings.NewReader(text), nil, underlyingSize) + require.NoError(t, err) + + t.Run("ReadWithinBounds", func(t *testing.T) { + r := pool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + + buf := make([]byte, 5) + n, err := r.ReadAt(buf, 0) + require.NoError(t, err) + require.Equal(t, 5, n) + require.Equal(t, []byte("Hello"), buf[:n]) + }) + + t.Run("ReadAcrossBoundary", func(t *testing.T) { + r := pool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + + buf := make([]byte, 7) + n, err := r.ReadAt(buf, 6) + require.Equal(t, err, io.EOF) + require.Equal(t, 6, n) + require.Equal(t, []byte("World!\x00"), buf) + }) + + t.Run("TruncateHidesData", func(t *testing.T) { + r := pool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + + require.NoError(t, r.Truncate(5)) + buf := make([]byte, 6) + n, err := r.ReadAt(buf, 0) + require.Equal(t, err, io.EOF) + require.Equal(t, 5, n) + require.Equal(t, []byte("Hello\x00"), buf) + }) + + t.Run("ShrinkThenGrow", func(t *testing.T) { + r := pool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + + require.NoError(t, r.Truncate(2)) + buf := make([]byte, 3) + n, err := r.ReadAt(buf, 0) + require.Equal(t, err, io.EOF) + require.Equal(t, 2, n) + require.Equal(t, []byte("He\x00"), buf) + + require.NoError(t, r.Truncate(5)) + n, err = r.ReadAt(buf, 0) + require.NoError(t, err) + require.Equal(t, 3, n) + require.Equal(t, []byte("He\x00"), buf) + }) + + t.Run("EdgeCaseEmptyUnderlying", func(t *testing.T) { + sparseReader, err := pool.NewSimpleSparseReaderAt(strings.NewReader(""), nil, 0) + r := pool.NewTruncatableSparseReaderAt(sparseReader, 0) + require.NoError(t, err) + buf := make([]byte, 5) + n, err := r.ReadAt(buf, 0) + require.Equal(t, io.EOF, err) + require.Equal(t, 0, n) + require.NoError(t, r.Truncate(5)) + n, err = r.ReadAt(buf, 0) + require.Equal(t, io.EOF, err) + require.Equal(t, 5, n) + require.Equal(t, []byte("\x00\x00\x00\x00\x00"), buf) + }) +} + +func TestTruncatableSparseReaderAt_GetNextRegionOffset(t *testing.T) { + text := "Hell\x00\x00World!" + underlyingSize := int64(len(text)) + sparseReader, err := pool.NewSimpleSparseReaderAt(strings.NewReader(text), []pool.Range{{Off: 4, Len: 2}}, underlyingSize) + require.NoError(t, err) + + t.Run("Untruncated", func(t *testing.T) { + r := pool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + var err error + var nextOffset int64 + // Holes should be at [4,5] and special eof hole at 12. + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, int64(4)) + } + for i := int64(4); i < 6; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + for i := int64(6); i < underlyingSize; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, underlyingSize) + } + nextOffset, err = r.GetNextRegionOffset(underlyingSize, filesystem.Hole) + require.Equal(t, err, io.EOF) + // Data should be in [0,3] and [6,11] + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + for i := int64(4); i < 6; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, nextOffset, int64(6)) + } + for i := int64(6); i < underlyingSize; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + nextOffset, err = r.GetNextRegionOffset(underlyingSize, filesystem.Data) + require.Equal(t, err, io.EOF) + }) + + t.Run("TruncateToHole", func(t *testing.T) { + r := pool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + var err error + var nextOffset int64 + err = r.Truncate(6) + require.NoError(t, err) + // Holes should be at [4,5]. + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, int64(4)) + } + for i := int64(4); i < 6; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + nextOffset, err = r.GetNextRegionOffset(6, filesystem.Hole) + require.Equal(t, err, io.EOF) + // Data should be at [0,3]. + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + for i := int64(4); i < 6; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.Equal(t, io.EOF, err) + } + nextOffset, err = r.GetNextRegionOffset(6, filesystem.Data) + require.Equal(t, io.EOF, err) + }) + + t.Run("TruncateThenGrow", func(t *testing.T) { + r := pool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + var err error + var nextOffset int64 + // Truncate down to 4, then grow back up to 12. Expect [4,11] to + // be holes. + err = r.Truncate(4) + require.NoError(t, err) + err = r.Truncate(12) + require.NoError(t, err) + // Holes should be in the region [4,11]. + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, int64(4)) + } + for i := int64(4); i < 12; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + nextOffset, err = r.GetNextRegionOffset(12, filesystem.Hole) + require.Equal(t, err, io.EOF) + // Data should be in the region [0,3]. + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + for i := int64(4); i < 12; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.Equal(t, err, io.EOF) + } + nextOffset, err = r.GetNextRegionOffset(12, filesystem.Hole) + require.Equal(t, err, io.EOF) + }) +} diff --git a/pkg/filesystem/virtual/pool_backed_file_allocator.go b/pkg/filesystem/virtual/pool_backed_file_allocator.go index a4a1572f..f1e30e74 100644 --- a/pkg/filesystem/virtual/pool_backed_file_allocator.go +++ b/pkg/filesystem/virtual/pool_backed_file_allocator.go @@ -71,7 +71,7 @@ func NewPoolBackedFileAllocator(pool pool.FilePool, errorLogger util.ErrorLogger } func (fa *poolBackedFileAllocator) NewFile(isExecutable bool, size uint64, shareAccess ShareMask) (LinkableLeaf, Status) { - file, err := fa.pool.NewFile() + file, err := fa.pool.NewFile(nil, 0) if err != nil { fa.errorLogger.Log(util.StatusWrapf(err, "Failed to create new file")) return nil, StatusErrIO diff --git a/pkg/filesystem/virtual/pool_backed_file_allocator_test.go b/pkg/filesystem/virtual/pool_backed_file_allocator_test.go index a14fbef3..ba559e29 100644 --- a/pkg/filesystem/virtual/pool_backed_file_allocator_test.go +++ b/pkg/filesystem/virtual/pool_backed_file_allocator_test.go @@ -32,7 +32,7 @@ func TestPoolBackedFileAllocatorGetBazelOutputServiceStat(t *testing.T) { // Create a file and initialize it with some contents. pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) errorLogger := mock.NewMockErrorLogger(ctrl) f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). @@ -201,7 +201,7 @@ func TestPoolBackedFileAllocatorVirtualSeek(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) errorLogger := mock.NewMockErrorLogger(ctrl) f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). @@ -268,7 +268,7 @@ func TestPoolBackedFileAllocatorVirtualOpenSelfStaleAfterUnlink(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) underlyingFile.EXPECT().Close() errorLogger := mock.NewMockErrorLogger(ctrl) @@ -293,7 +293,7 @@ func TestPoolBackedFileAllocatorVirtualOpenSelfStaleAfterClose(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) underlyingFile.EXPECT().Close() errorLogger := mock.NewMockErrorLogger(ctrl) @@ -315,7 +315,7 @@ func TestPoolBackedFileAllocatorVirtualRead(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) errorLogger := mock.NewMockErrorLogger(ctrl) f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). @@ -390,7 +390,7 @@ func TestPoolBackedFileAllocatorFUSETruncateFailure(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) underlyingFile.EXPECT().Truncate(int64(42)).Return(status.Error(codes.Unavailable, "Storage backends offline")) underlyingFile.EXPECT().Close() @@ -417,7 +417,7 @@ func TestPoolBackedFileAllocatorVirtualWriteFailure(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) var p [10]byte underlyingFile.EXPECT().WriteAt(p[:], int64(42)).Return(0, status.Error(codes.Unavailable, "Storage backends offline")) underlyingFile.EXPECT().Close() @@ -440,7 +440,7 @@ func TestPoolBackedFileAllocatorUploadFile(t *testing.T) { // Create a file backed by a FilePool. pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) errorLogger := mock.NewMockErrorLogger(ctrl) f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). @@ -610,7 +610,7 @@ func TestPoolBackedFileAllocatorVirtualClose(t *testing.T) { // Create a new file. pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) errorLogger := mock.NewMockErrorLogger(ctrl) f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger).