diff --git a/cmd/bb_noop_worker/BUILD.bazel b/cmd/bb_noop_worker/BUILD.bazel index c680bf1f..bf74d489 100644 --- a/cmd/bb_noop_worker/BUILD.bazel +++ b/cmd/bb_noop_worker/BUILD.bazel @@ -9,7 +9,7 @@ go_library( deps = [ "//pkg/blobstore", "//pkg/builder", - "//pkg/filesystem", + "//pkg/filesystem/filepool", "//pkg/proto/configuration/bb_noop_worker", "//pkg/proto/remoteworker", "@com_github_buildbarn_bb_storage//pkg/blobstore/configuration", diff --git a/cmd/bb_noop_worker/main.go b/cmd/bb_noop_worker/main.go index ef6bef3f..5bc50a69 100644 --- a/cmd/bb_noop_worker/main.go +++ b/cmd/bb_noop_worker/main.go @@ -7,7 +7,7 @@ import ( re_blobstore "github.com/buildbarn/bb-remote-execution/pkg/blobstore" "github.com/buildbarn/bb-remote-execution/pkg/builder" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/bb_noop_worker" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" blobstore_configuration "github.com/buildbarn/bb-storage/pkg/blobstore/configuration" @@ -76,7 +76,7 @@ func main() { contentAddressableStorage, int(configuration.MaximumMessageSizeBytes), browserURL), - re_filesystem.EmptyFilePool, + filepool.EmptyFilePool, clock.SystemClock, configuration.WorkerId, instanceNamePrefix, diff --git a/cmd/bb_worker/BUILD.bazel b/cmd/bb_worker/BUILD.bazel index ba6bba7b..96d0ebdb 100644 --- a/cmd/bb_worker/BUILD.bazel +++ b/cmd/bb_worker/BUILD.bazel @@ -16,7 +16,7 @@ go_library( "//pkg/cas", "//pkg/cleaner", "//pkg/clock", - "//pkg/filesystem", + "//pkg/filesystem/filepool", "//pkg/filesystem/virtual", "//pkg/filesystem/virtual/configuration", "//pkg/proto/completedactionlogger", diff --git a/cmd/bb_worker/main.go b/cmd/bb_worker/main.go index 40754bb5..c7677657 100644 --- a/cmd/bb_worker/main.go +++ b/cmd/bb_worker/main.go @@ -18,7 +18,7 @@ import ( "github.com/buildbarn/bb-remote-execution/pkg/cas" "github.com/buildbarn/bb-remote-execution/pkg/cleaner" re_clock "github.com/buildbarn/bb-remote-execution/pkg/clock" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" virtual_configuration "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/configuration" cal_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/completedactionlogger" @@ -76,7 +76,7 @@ func main() { // currently only used by the virtual file system to store // output files of build actions. Going forward, this may be // used to store core dumps generated by build actions as well. - filePool, err := re_filesystem.NewFilePoolFromConfiguration(configuration.FilePool) + filePool, err := filepool.NewFilePoolFromConfiguration(configuration.FilePool) if err != nil { return util.StatusWrap(err, "Failed to create file pool") } @@ -218,7 +218,7 @@ func main() { virtualBuildDirectory = virtual.NewInMemoryPrepopulatedDirectory( virtual.NewHandleAllocatingFileAllocator( virtual.NewPoolBackedFileAllocator( - re_filesystem.EmptyFilePool, + filepool.EmptyFilePool, util.DefaultErrorLogger), handleAllocator), symlinkFactory, @@ -466,7 +466,7 @@ func main() { buildClient := builder.NewBuildClient( schedulerClient, buildExecutor, - re_filesystem.NewQuotaEnforcingFilePool( + filepool.NewQuotaEnforcingFilePool( filePool, runnerConfiguration.MaximumFilePoolFileCount, runnerConfiguration.MaximumFilePoolSizeBytes), diff --git a/internal/mock/BUILD.bazel b/internal/mock/BUILD.bazel index 3692a636..7f09d2ec 100644 --- a/internal/mock/BUILD.bazel +++ b/internal/mock/BUILD.bazel @@ -172,7 +172,6 @@ gomock( out = "filesystem_re.go", interfaces = [ "DirectoryOpener", - "FilePool", "SectorAllocator", ], library = "//pkg/filesystem", @@ -181,6 +180,19 @@ gomock( package = "mock", ) +gomock( + name = "filesystem_filepool", + out = "filesystem_filepool.go", + interfaces = [ + "FilePool", + "ReaderAt", + ], + library = "//pkg/filesystem/filepool", + mockgen_model_library = "@org_uber_go_mock//mockgen/model", + mockgen_tool = "@org_uber_go_mock//mockgen", + package = "mock", +) + gomock( name = "filesystem_virtual", out = "filesystem_virtual.go", @@ -411,6 +423,7 @@ go_library( ":completedactionlogger.go", ":filesystem.go", ":filesystem_access.go", + ":filesystem_filepool.go", ":filesystem_re.go", ":filesystem_virtual.go", ":grpc_go.go", @@ -445,6 +458,7 @@ go_library( "//pkg/cleaner", "//pkg/filesystem", "//pkg/filesystem/access", + "//pkg/filesystem/filepool", "//pkg/filesystem/virtual", "//pkg/proto/bazeloutputservice", "//pkg/proto/buildqueuestate", diff --git a/pkg/builder/BUILD.bazel b/pkg/builder/BUILD.bazel index a81db94a..543f4d3c 100644 --- a/pkg/builder/BUILD.bazel +++ b/pkg/builder/BUILD.bazel @@ -36,8 +36,8 @@ go_library( "//pkg/cas", "//pkg/cleaner", "//pkg/clock", - "//pkg/filesystem", "//pkg/filesystem/access", + "//pkg/filesystem/filepool", "//pkg/filesystem/virtual", "//pkg/proto/cas", "//pkg/proto/completedactionlogger", @@ -104,8 +104,8 @@ go_test( "//internal/mock", "//pkg/cleaner", "//pkg/clock", - "//pkg/filesystem", "//pkg/filesystem/access", + "//pkg/filesystem/filepool", "//pkg/proto/cas", "//pkg/proto/completedactionlogger", "//pkg/proto/remoteworker", diff --git a/pkg/builder/build_client.go b/pkg/builder/build_client.go index 6d50a20f..ed8bcfcf 100644 --- a/pkg/builder/build_client.go +++ b/pkg/builder/build_client.go @@ -6,7 +6,7 @@ import ( "time" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-storage/pkg/clock" "github.com/buildbarn/bb-storage/pkg/digest" @@ -28,7 +28,7 @@ type BuildClient struct { // Constant fields. scheduler remoteworker.OperationQueueClient buildExecutor BuildExecutor - filePool filesystem.FilePool + filePool filepool.FilePool clock clock.Clock instanceNamePrefix digest.InstanceName instanceNamePatcher digest.InstanceNamePatcher @@ -45,7 +45,7 @@ type BuildClient struct { // NewBuildClient creates a new BuildClient instance that is set to the // initial state (i.e., being idle). -func NewBuildClient(scheduler remoteworker.OperationQueueClient, buildExecutor BuildExecutor, filePool filesystem.FilePool, clock clock.Clock, workerID map[string]string, instanceNamePrefix digest.InstanceName, platform *remoteexecution.Platform, sizeClass uint32) *BuildClient { +func NewBuildClient(scheduler remoteworker.OperationQueueClient, buildExecutor BuildExecutor, filePool filepool.FilePool, clock clock.Clock, workerID map[string]string, instanceNamePrefix digest.InstanceName, platform *remoteexecution.Platform, sizeClass uint32) *BuildClient { return &BuildClient{ scheduler: scheduler, buildExecutor: buildExecutor, diff --git a/pkg/builder/build_directory.go b/pkg/builder/build_directory.go index b92bbc5f..fe06e51e 100644 --- a/pkg/builder/build_directory.go +++ b/pkg/builder/build_directory.go @@ -4,8 +4,8 @@ import ( "context" "os" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-storage/pkg/digest" "github.com/buildbarn/bb-storage/pkg/filesystem" "github.com/buildbarn/bb-storage/pkg/filesystem/path" @@ -36,7 +36,7 @@ type BuildDirectory interface { // errors. Implementations of BuildDirectory are free to let // this be a no-op, with the disadvantage that they cannot apply // resource limits or provide rich I/O error messages. - InstallHooks(filePool re_filesystem.FilePool, errorLogger util.ErrorLogger) + InstallHooks(filePool filepool.FilePool, errorLogger util.ErrorLogger) // Recursively merges the contents of a Directory stored in the // Content Addressable Storage into a local directory. If this diff --git a/pkg/builder/build_executor.go b/pkg/builder/build_executor.go index 68879138..9ccb3c0b 100644 --- a/pkg/builder/build_executor.go +++ b/pkg/builder/build_executor.go @@ -4,8 +4,8 @@ import ( "context" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-storage/pkg/digest" @@ -67,5 +67,5 @@ func GetResultAndGRPCCodeFromExecuteResponse(response *remoteexecution.ExecuteRe // requests and yield an execute response. type BuildExecutor interface { CheckReadiness(ctx context.Context) error - Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse + Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse } diff --git a/pkg/builder/caching_build_executor.go b/pkg/builder/caching_build_executor.go index 29942bbe..0be5eea5 100644 --- a/pkg/builder/caching_build_executor.go +++ b/pkg/builder/caching_build_executor.go @@ -5,8 +5,8 @@ import ( "net/url" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" cas_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/cas" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" re_util "github.com/buildbarn/bb-remote-execution/pkg/util" @@ -42,7 +42,7 @@ func NewCachingBuildExecutor(base BuildExecutor, contentAddressableStorage, acti } } -func (be *cachingBuildExecutor) Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *cachingBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) if actionDigest, err := digestFunction.NewDigestFromProto(request.ActionDigest); err != nil { attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to extract digest for action")) diff --git a/pkg/builder/completed_action_logging_build_executor.go b/pkg/builder/completed_action_logging_build_executor.go index 76e0072a..46b67d1b 100644 --- a/pkg/builder/completed_action_logging_build_executor.go +++ b/pkg/builder/completed_action_logging_build_executor.go @@ -4,8 +4,8 @@ import ( "context" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" cas_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/cas" cal_proto "github.com/buildbarn/bb-remote-execution/pkg/proto/completedactionlogger" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" @@ -34,7 +34,7 @@ func NewCompletedActionLoggingBuildExecutor(base BuildExecutor, uuidGenerator ut } } -func (be *completedActionLoggingBuildExecutor) Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *completedActionLoggingBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) completedAction := &cal_proto.CompletedAction{ diff --git a/pkg/builder/cost_computing_build_executor.go b/pkg/builder/cost_computing_build_executor.go index 3ea8d993..0b1748e3 100644 --- a/pkg/builder/cost_computing_build_executor.go +++ b/pkg/builder/cost_computing_build_executor.go @@ -5,8 +5,8 @@ import ( remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" "github.com/buildbarn/bb-storage/pkg/digest" @@ -31,7 +31,7 @@ func NewCostComputingBuildExecutor(base BuildExecutor, expensesPerSecond map[str } } -func (be *costComputingBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *costComputingBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) totalTime := response.Result.ExecutionMetadata.WorkerCompletedTimestamp.AsTime().Sub(response.Result.ExecutionMetadata.WorkerStartTimestamp.AsTime()).Seconds() diff --git a/pkg/builder/file_pool_stats_build_executor.go b/pkg/builder/file_pool_stats_build_executor.go index b7888bbc..2f044ff7 100644 --- a/pkg/builder/file_pool_stats_build_executor.go +++ b/pkg/builder/file_pool_stats_build_executor.go @@ -5,8 +5,8 @@ import ( "sync" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" "github.com/buildbarn/bb-storage/pkg/digest" @@ -30,7 +30,7 @@ func NewFilePoolStatsBuildExecutor(buildExecutor BuildExecutor) BuildExecutor { } } -func (be *filePoolStatsBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *filePoolStatsBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { fp := statsCollectingFilePool{base: filePool} response := be.BuildExecutor.Execute(ctx, &fp, monitor, digestFunction, request, executionStateUpdates) @@ -49,7 +49,7 @@ func (be *filePoolStatsBuildExecutor) Execute(ctx context.Context, filePool re_f // statsCollectingFilePool is a decorator for FilePool that measures the // number of files created and the number of operations performed. type statsCollectingFilePool struct { - base re_filesystem.FilePool + base filepool.FilePool lock sync.Mutex stats resourceusage.FilePoolResourceUsage @@ -57,8 +57,8 @@ type statsCollectingFilePool struct { totalFiles uint64 } -func (fp *statsCollectingFilePool) NewFile() (filesystem.FileReadWriter, error) { - f, err := fp.base.NewFile() +func (fp *statsCollectingFilePool) NewFile(sparseReaderAt filepool.SparseReaderAt, size uint64) (filesystem.FileReadWriter, error) { + f, err := fp.base.NewFile(sparseReaderAt, size) if err != nil { return nil, err } diff --git a/pkg/builder/file_pool_stats_build_executor_test.go b/pkg/builder/file_pool_stats_build_executor_test.go index b269c82a..a335d9f3 100644 --- a/pkg/builder/file_pool_stats_build_executor_test.go +++ b/pkg/builder/file_pool_stats_build_executor_test.go @@ -8,8 +8,8 @@ import ( remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" "github.com/buildbarn/bb-remote-execution/internal/mock" "github.com/buildbarn/bb-remote-execution/pkg/builder" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" "github.com/buildbarn/bb-storage/pkg/digest" @@ -43,13 +43,13 @@ func TestFilePoolStatsBuildExecutorExample(t *testing.T) { monitor, digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5), request, - gomock.Any()).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { - f, err := filePool.NewFile() + gomock.Any()).DoAndReturn(func(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + f, err := filePool.NewFile(nil, 0) require.NoError(t, err) require.NoError(t, f.Truncate(5)) require.NoError(t, f.Close()) - f, err = filePool.NewFile() + f, err = filePool.NewFile(nil, 0) require.NoError(t, err) n, err := f.WriteAt([]byte("Hello"), 100) require.Equal(t, 5, n) @@ -70,12 +70,29 @@ func TestFilePoolStatsBuildExecutorExample(t *testing.T) { } }) + // Mock all the file operations performed in the execution request. + filePool := mock.NewMockFilePool(ctrl) + file1 := mock.NewMockFileReadWriter(ctrl) + file2 := mock.NewMockFileReadWriter(ctrl) + + filePool.EXPECT().NewFile(nil, uint64(0)).Return(file1, nil) + file1.EXPECT().Truncate(int64(5)).Return(nil) + file1.EXPECT().Close().Return(nil) + filePool.EXPECT().NewFile(nil, uint64(0)).Return(file2, nil) + file2.EXPECT().WriteAt([]byte("Hello"), int64(100)).Return(5, nil) + file2.EXPECT().ReadAt(gomock.Any(), int64(98)).DoAndReturn(func(p []byte, offset int64) (int, error) { + copy(p, []byte("\x00\x00Hello\x00\x00\x00")) + return 7, io.EOF + }) + file2.EXPECT().Truncate(int64(42)).Return(nil) + file2.EXPECT().Close().Return(nil) + // Perform the execution request. executionStateUpdates := make(chan *remoteworker.CurrentState_Executing, 3) buildExecutor := builder.NewFilePoolStatsBuildExecutor(baseBuildExecutor) executeResponse := buildExecutor.Execute( ctx, - filesystem.InMemoryFilePool, + filePool, monitor, digest.MustNewFunction("hello", remoteexecution.DigestFunction_MD5), request, diff --git a/pkg/builder/local_build_executor.go b/pkg/builder/local_build_executor.go index a2a1c77d..811aaf64 100644 --- a/pkg/builder/local_build_executor.go +++ b/pkg/builder/local_build_executor.go @@ -8,8 +8,8 @@ import ( remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" re_clock "github.com/buildbarn/bb-remote-execution/pkg/clock" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner" "github.com/buildbarn/bb-storage/pkg/blobstore" @@ -127,7 +127,7 @@ func (be *localBuildExecutor) CheckReadiness(ctx context.Context) error { return err } -func (be *localBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *localBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { // Timeout handling. response := NewDefaultExecuteResponse(request) action := request.Action diff --git a/pkg/builder/logging_build_executor.go b/pkg/builder/logging_build_executor.go index e1917ae0..e4d75e14 100644 --- a/pkg/builder/logging_build_executor.go +++ b/pkg/builder/logging_build_executor.go @@ -6,8 +6,8 @@ import ( "net/url" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" re_util "github.com/buildbarn/bb-remote-execution/pkg/util" "github.com/buildbarn/bb-storage/pkg/digest" @@ -31,7 +31,7 @@ func NewLoggingBuildExecutor(base BuildExecutor, browserURL *url.URL) BuildExecu } } -func (be *loggingBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *loggingBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { // Print URL to bb_browser prior to execution. if actionDigest, err := digestFunction.NewDigestFromProto(request.ActionDigest); err == nil { log.Printf("Action: %s with timeout %s", re_util.GetBrowserURL(be.browserURL, "action", actionDigest), request.Action.GetTimeout().AsDuration()) diff --git a/pkg/builder/metrics_build_executor.go b/pkg/builder/metrics_build_executor.go index 169bd572..a2d61719 100644 --- a/pkg/builder/metrics_build_executor.go +++ b/pkg/builder/metrics_build_executor.go @@ -5,8 +5,8 @@ import ( "sync" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" "github.com/buildbarn/bb-storage/pkg/digest" @@ -292,7 +292,7 @@ func observeTimestampDelta(histogram prometheus.Observer, pbStart, pbCompleted * histogram.Observe(pbCompleted.AsTime().Sub(pbStart.AsTime()).Seconds()) } -func (be *metricsBuildExecutor) Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *metricsBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) result, grpcCode := GetResultAndGRPCCodeFromExecuteResponse(response) diff --git a/pkg/builder/naive_build_directory.go b/pkg/builder/naive_build_directory.go index d36ac325..7e9e02b4 100644 --- a/pkg/builder/naive_build_directory.go +++ b/pkg/builder/naive_build_directory.go @@ -6,8 +6,8 @@ import ( "math" "github.com/buildbarn/bb-remote-execution/pkg/cas" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-storage/pkg/blobstore" "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" "github.com/buildbarn/bb-storage/pkg/digest" @@ -74,7 +74,7 @@ func (d *naiveBuildDirectory) EnterUploadableDirectory(name path.Component) (Upl return d.EnterBuildDirectory(name) } -func (d *naiveBuildDirectory) InstallHooks(filePool re_filesystem.FilePool, errorLogger util.ErrorLogger) { +func (d *naiveBuildDirectory) InstallHooks(filePool filepool.FilePool, errorLogger util.ErrorLogger) { // Simply ignore the provided hooks, as POSIX offers no way to // install them. This means no quota enforcement and detection // of I/O errors is performed. diff --git a/pkg/builder/noop_build_executor.go b/pkg/builder/noop_build_executor.go index 0a276443..1e933391 100644 --- a/pkg/builder/noop_build_executor.go +++ b/pkg/builder/noop_build_executor.go @@ -7,8 +7,8 @@ import ( "text/template" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" re_util "github.com/buildbarn/bb-remote-execution/pkg/util" "github.com/buildbarn/bb-storage/pkg/blobstore" @@ -48,7 +48,7 @@ var defaultNoopErrorMessageTemplate = template.Must( template.New("NoopBuildExecutor"). Parse("Action has been uploaded, but will not be executed. Action details: {{ .ActionURL }}")) -func (be *noopBuildExecutor) Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *noopBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { // Obtain action digest, which can be embedded in the error message. response := NewDefaultExecuteResponse(request) actionDigest, err := digestFunction.NewDigestFromProto(request.ActionDigest) diff --git a/pkg/builder/prefetching_build_executor.go b/pkg/builder/prefetching_build_executor.go index 0df2ce9a..857192af 100644 --- a/pkg/builder/prefetching_build_executor.go +++ b/pkg/builder/prefetching_build_executor.go @@ -7,8 +7,8 @@ import ( remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" "github.com/buildbarn/bb-remote-execution/pkg/cas" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-storage/pkg/blobstore" "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" @@ -76,7 +76,7 @@ func (be *prefetchingBuildExecutor) computeProfile(monitor *access.BloomFilterCo } } -func (be *prefetchingBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *prefetchingBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { // Obtain the reduced Action digest, which is needed to read // from, and write to the File System Access Cache (FSAC). action := request.Action diff --git a/pkg/builder/prefetching_build_executor_test.go b/pkg/builder/prefetching_build_executor_test.go index 57aa4bbb..0e1706a3 100644 --- a/pkg/builder/prefetching_build_executor_test.go +++ b/pkg/builder/prefetching_build_executor_test.go @@ -7,8 +7,8 @@ import ( remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" "github.com/buildbarn/bb-remote-execution/internal/mock" "github.com/buildbarn/bb-remote-execution/pkg/builder" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-remote-execution/pkg/proto/resourceusage" "github.com/buildbarn/bb-storage/pkg/blobstore/buffer" @@ -182,7 +182,7 @@ func TestPrefetchingBuildExecutor(t *testing.T) { digestFunction, testutil.EqProto(t, exampleRequest), executionStateUpdates, - ).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + ).DoAndReturn(func(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { <-ctx.Done() require.Equal(t, context.Canceled, ctx.Err()) return &remoteexecution.ExecuteResponse{ @@ -224,7 +224,7 @@ func TestPrefetchingBuildExecutor(t *testing.T) { digestFunction, testutil.EqProto(t, exampleRequest), executionStateUpdates, - ).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + ).DoAndReturn(func(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { <-ctx.Done() require.Equal(t, context.Canceled, ctx.Err()) return &remoteexecution.ExecuteResponse{ @@ -271,7 +271,7 @@ func TestPrefetchingBuildExecutor(t *testing.T) { digestFunction, testutil.EqProto(t, exampleRequest), executionStateUpdates, - ).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + ).DoAndReturn(func(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { <-ctx.Done() require.Equal(t, context.Canceled, ctx.Err()) return &remoteexecution.ExecuteResponse{ @@ -416,7 +416,7 @@ func TestPrefetchingBuildExecutor(t *testing.T) { digestFunction, testutil.EqProto(t, exampleRequest), executionStateUpdates, - ).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + ).DoAndReturn(func(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { monitor.ReadDirectory() return &remoteexecution.ExecuteResponse{ Result: &remoteexecution.ActionResult{ diff --git a/pkg/builder/storage_flushing_build_executor.go b/pkg/builder/storage_flushing_build_executor.go index fd48c26b..4cf564c4 100644 --- a/pkg/builder/storage_flushing_build_executor.go +++ b/pkg/builder/storage_flushing_build_executor.go @@ -4,8 +4,8 @@ import ( "context" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-storage/pkg/digest" ) @@ -31,7 +31,7 @@ func NewStorageFlushingBuildExecutor(base BuildExecutor, flush StorageFlusher) B } } -func (be *storageFlushingBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *storageFlushingBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { response := be.BuildExecutor.Execute(ctx, filePool, monitor, digestFunction, request, executionStateUpdates) if err := be.flush(ctx); err != nil { attachErrorToExecuteResponse(response, err) diff --git a/pkg/builder/test_infrastructure_failure_detecting_build_executor.go b/pkg/builder/test_infrastructure_failure_detecting_build_executor.go index 0fe76003..49fffe57 100644 --- a/pkg/builder/test_infrastructure_failure_detecting_build_executor.go +++ b/pkg/builder/test_infrastructure_failure_detecting_build_executor.go @@ -7,8 +7,8 @@ import ( "sync/atomic" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-storage/pkg/digest" "github.com/buildbarn/bb-storage/pkg/program" @@ -105,7 +105,7 @@ func (be *testInfrastructureFailureDetectingBuildExecutor) CheckReadiness(ctx co }) } -func (be *testInfrastructureFailureDetectingBuildExecutor) Execute(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *testInfrastructureFailureDetectingBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { if err := be.shutdownState.isShutDown(); err != nil { response := NewDefaultExecuteResponse(request) attachErrorToExecuteResponse(response, err) diff --git a/pkg/builder/timestamped_build_executor.go b/pkg/builder/timestamped_build_executor.go index d4248583..d9c6c56c 100644 --- a/pkg/builder/timestamped_build_executor.go +++ b/pkg/builder/timestamped_build_executor.go @@ -4,8 +4,8 @@ import ( "context" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-storage/pkg/clock" "github.com/buildbarn/bb-storage/pkg/digest" @@ -38,7 +38,7 @@ func (be *timestampedBuildExecutor) getCurrentTime() *timestamppb.Timestamp { return timestamppb.New(be.clock.Now()) } -func (be *timestampedBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *timestampedBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { // Initial metadata, using the current time as the start timestamp. metadata := remoteexecution.ExecutedActionMetadata{ Worker: be.workerName, diff --git a/pkg/builder/timestamped_build_executor_test.go b/pkg/builder/timestamped_build_executor_test.go index c60457f2..9be9657f 100644 --- a/pkg/builder/timestamped_build_executor_test.go +++ b/pkg/builder/timestamped_build_executor_test.go @@ -8,8 +8,8 @@ import ( remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" "github.com/buildbarn/bb-remote-execution/internal/mock" "github.com/buildbarn/bb-remote-execution/pkg/builder" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-storage/pkg/digest" "github.com/buildbarn/bb-storage/pkg/testutil" @@ -69,7 +69,7 @@ func TestTimestampedBuildExecutorExample(t *testing.T) { monitor, digest.MustNewFunction("main", remoteexecution.DigestFunction_MD5), request, - gomock.Any()).DoAndReturn(func(ctx context.Context, filePool filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + gomock.Any()).DoAndReturn(func(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { clock.EXPECT().Now().Return(time.Unix(1001, 0)) executionStateUpdates <- updateFetchingInputs clock.EXPECT().Now().Return(time.Unix(1002, 0)) diff --git a/pkg/builder/tracing_build_executor.go b/pkg/builder/tracing_build_executor.go index 4dab29e2..c8e79c9a 100644 --- a/pkg/builder/tracing_build_executor.go +++ b/pkg/builder/tracing_build_executor.go @@ -4,8 +4,8 @@ import ( "context" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-storage/pkg/digest" @@ -29,7 +29,7 @@ func NewTracingBuildExecutor(buildExecutor BuildExecutor, tracerProvider trace.T } } -func (be *tracingBuildExecutor) Execute(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { +func (be *tracingBuildExecutor) Execute(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { actionDigest := request.ActionDigest action := request.Action ctxWithTracing, span := be.tracer.Start(ctx, "BuildExecutor.Execute", trace.WithAttributes( diff --git a/pkg/builder/tracing_build_executor_test.go b/pkg/builder/tracing_build_executor_test.go index 897be759..64e35748 100644 --- a/pkg/builder/tracing_build_executor_test.go +++ b/pkg/builder/tracing_build_executor_test.go @@ -7,8 +7,8 @@ import ( remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" "github.com/buildbarn/bb-remote-execution/internal/mock" "github.com/buildbarn/bb-remote-execution/pkg/builder" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/remoteworker" "github.com/buildbarn/bb-storage/pkg/digest" "github.com/buildbarn/bb-storage/pkg/testutil" @@ -65,7 +65,7 @@ func TestTracingBuildExecutor(t *testing.T) { monitor := mock.NewMockUnreadDirectoryMonitor(ctrl) digestFunction := digest.MustNewFunction("hello", remoteexecution.DigestFunction_SHA256) baseBuildExecutor.EXPECT().Execute(ctxWithTracing, filePool, monitor, digestFunction, testutil.EqProto(t, request), gomock.Any()).DoAndReturn( - func(ctx context.Context, filePool re_filesystem.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { + func(ctx context.Context, filePool filepool.FilePool, monitor access.UnreadDirectoryMonitor, digestFunction digest.Function, request *remoteworker.DesiredState_Executing, executionStateUpdates chan<- *remoteworker.CurrentState_Executing) *remoteexecution.ExecuteResponse { executionStateUpdates <- fetchingInputs executionStateUpdates <- running executionStateUpdates <- uploadingOutputs diff --git a/pkg/builder/virtual_build_directory.go b/pkg/builder/virtual_build_directory.go index d1e0a90e..3d1fe0ea 100644 --- a/pkg/builder/virtual_build_directory.go +++ b/pkg/builder/virtual_build_directory.go @@ -6,8 +6,8 @@ import ( "syscall" "github.com/buildbarn/bb-remote-execution/pkg/cas" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/access" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" "github.com/buildbarn/bb-storage/pkg/blobstore" "github.com/buildbarn/bb-storage/pkg/digest" @@ -78,7 +78,7 @@ func (d *virtualBuildDirectory) EnterUploadableDirectory(name path.Component) (U return d.EnterBuildDirectory(name) } -func (d *virtualBuildDirectory) InstallHooks(filePool re_filesystem.FilePool, errorLogger util.ErrorLogger) { +func (d *virtualBuildDirectory) InstallHooks(filePool filepool.FilePool, errorLogger util.ErrorLogger) { d.PrepopulatedDirectory.InstallHooks( virtual.NewHandleAllocatingFileAllocator( virtual.NewPoolBackedFileAllocator(filePool, errorLogger), diff --git a/pkg/filesystem/BUILD.bazel b/pkg/filesystem/BUILD.bazel index b41e3db9..4e37db74 100644 --- a/pkg/filesystem/BUILD.bazel +++ b/pkg/filesystem/BUILD.bazel @@ -4,26 +4,15 @@ go_library( name = "filesystem", srcs = [ "bitmap_sector_allocator.go", - "block_device_backed_file_pool.go", - "configuration.go", - "directory_backed_file_pool.go", - "empty_file_pool.go", - "file_pool.go", - "in_memory_file_pool.go", "lazy_directory.go", - "metrics_file_pool.go", - "quota_enforcing_file_pool.go", "sector_allocator.go", ], importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem", visibility = ["//visibility:public"], deps = [ - "//pkg/proto/configuration/filesystem", - "@com_github_buildbarn_bb_storage//pkg/blockdevice", "@com_github_buildbarn_bb_storage//pkg/filesystem", "@com_github_buildbarn_bb_storage//pkg/filesystem/path", "@com_github_buildbarn_bb_storage//pkg/util", - "@com_github_prometheus_client_golang//prometheus", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", ], @@ -33,12 +22,7 @@ go_test( name = "filesystem_test", srcs = [ "bitmap_sector_allocator_test.go", - "block_device_backed_file_pool_test.go", - "directory_backed_file_pool_test.go", - "empty_file_pool_test.go", - "in_memory_file_pool_test.go", "lazy_directory_test.go", - "quota_enforcing_file_pool_test.go", ], deps = [ ":filesystem", diff --git a/pkg/filesystem/directory_backed_file_pool.go b/pkg/filesystem/directory_backed_file_pool.go deleted file mode 100644 index 2cbe1276..00000000 --- a/pkg/filesystem/directory_backed_file_pool.go +++ /dev/null @@ -1,106 +0,0 @@ -package filesystem - -import ( - "io" - "os" - "strconv" - "sync/atomic" - - "github.com/buildbarn/bb-storage/pkg/filesystem" - "github.com/buildbarn/bb-storage/pkg/filesystem/path" -) - -type directoryBackedFilePool struct { - directory filesystem.Directory - - nextID atomic.Uint64 -} - -// NewDirectoryBackedFilePool creates a FilePool that stores all data -// written to files into a single directory on disk. Files stored in the -// underlying directory are simply identified by an incrementing number. -// -// As many files may exist at a given point in time, this implementation -// does not keep any backing files open. This would exhaust the worker's -// file descriptor table. Files are opened on demand. -// -// TODO: Maybe use an eviction.Set to keep a small number of files open? -func NewDirectoryBackedFilePool(directory filesystem.Directory) FilePool { - return &directoryBackedFilePool{ - directory: directory, - } -} - -func (fp *directoryBackedFilePool) NewFile() (filesystem.FileReadWriter, error) { - return &lazyOpeningSelfDeletingFile{ - directory: fp.directory, - name: path.MustNewComponent(strconv.FormatUint(fp.nextID.Add(1), 10)), - }, nil -} - -// lazyOpeningSelfDeletingFile is a file descriptor that forwards -// operations to a file that is opened on demand. Upon closure, the -// underlying file is unlinked. -type lazyOpeningSelfDeletingFile struct { - directory filesystem.Directory - name path.Component -} - -func (f *lazyOpeningSelfDeletingFile) Close() error { - if err := f.directory.Remove(f.name); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -func (f *lazyOpeningSelfDeletingFile) GetNextRegionOffset(off int64, regionType filesystem.RegionType) (int64, error) { - fh, err := f.directory.OpenRead(f.name) - if os.IsNotExist(err) { - // Empty file that doesn't explicitly exist in the - // backing store yet. Treat it as if it's a zero-length - // file. - return 0, io.EOF - } else if err != nil { - return 0, err - } - defer fh.Close() - return fh.GetNextRegionOffset(off, regionType) -} - -func (f *lazyOpeningSelfDeletingFile) ReadAt(p []byte, off int64) (int, error) { - fh, err := f.directory.OpenRead(f.name) - if os.IsNotExist(err) { - // Empty file that doesn't explicitly exist in the - // backing store yet. Treat it as if it's a zero-length - // file. - return 0, io.EOF - } else if err != nil { - return 0, err - } - defer fh.Close() - return fh.ReadAt(p, off) -} - -func (f *lazyOpeningSelfDeletingFile) Sync() error { - // Because FilePool does not provide any persistency, there is - // no need to synchronize any data. - return nil -} - -func (f *lazyOpeningSelfDeletingFile) Truncate(size int64) error { - fh, err := f.directory.OpenWrite(f.name, filesystem.CreateReuse(0o600)) - if err != nil { - return err - } - defer fh.Close() - return fh.Truncate(size) -} - -func (f *lazyOpeningSelfDeletingFile) WriteAt(p []byte, off int64) (int, error) { - fh, err := f.directory.OpenWrite(f.name, filesystem.CreateReuse(0o600)) - if err != nil { - return 0, err - } - defer fh.Close() - return fh.WriteAt(p, off) -} diff --git a/pkg/filesystem/directory_backed_file_pool_test.go b/pkg/filesystem/directory_backed_file_pool_test.go deleted file mode 100644 index 8142ec81..00000000 --- a/pkg/filesystem/directory_backed_file_pool_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package filesystem_test - -import ( - "io" - "syscall" - "testing" - - "github.com/buildbarn/bb-remote-execution/internal/mock" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" - "github.com/buildbarn/bb-storage/pkg/filesystem" - "github.com/buildbarn/bb-storage/pkg/filesystem/path" - "github.com/stretchr/testify/require" - - "go.uber.org/mock/gomock" -) - -func TestDirectoryBackedFilePool(t *testing.T) { - ctrl := gomock.NewController(t) - - directory := mock.NewMockDirectory(ctrl) - fp := re_filesystem.NewDirectoryBackedFilePool(directory) - - t.Run("EmptyFile", func(t *testing.T) { - f, err := fp.NewFile() - require.NoError(t, err) - - // Underlying file should not yet exist. This should be - // interpreted as if the file is empty. - directory.EXPECT().OpenRead(path.MustNewComponent("1")).Return(nil, syscall.ENOENT) - var p [10]byte - n, err := f.ReadAt(p[:], 0) - require.Equal(t, 0, n) - require.Equal(t, io.EOF, err) - - // GetNextRegionOffset() should behave similarly. - directory.EXPECT().OpenRead(path.MustNewComponent("1")).Return(nil, syscall.ENOENT) - _, err = f.GetNextRegionOffset(0, filesystem.Data) - require.Equal(t, io.EOF, err) - - directory.EXPECT().OpenRead(path.MustNewComponent("1")).Return(nil, syscall.ENOENT) - _, err = f.GetNextRegionOffset(0, filesystem.Hole) - require.Equal(t, io.EOF, err) - - directory.EXPECT().Remove(path.MustNewComponent("1")).Return(syscall.ENOENT) - require.NoError(t, f.Close()) - }) - - t.Run("NonEmptyFile", func(t *testing.T) { - f, err := fp.NewFile() - require.NoError(t, err) - - // Write a piece of text into the file. - fileWriter := mock.NewMockFileWriter(ctrl) - directory.EXPECT().OpenWrite(path.MustNewComponent("2"), filesystem.CreateReuse(0o600)).Return(fileWriter, nil) - fileWriter.EXPECT().WriteAt([]byte("Hello, world"), int64(123)).Return(12, nil) - fileWriter.EXPECT().Close() - n, err := f.WriteAt([]byte("Hello, world"), 123) - require.Equal(t, 12, n) - require.NoError(t, err) - - // Truncate a part of it. - fileWriter = mock.NewMockFileWriter(ctrl) - directory.EXPECT().OpenWrite(path.MustNewComponent("2"), filesystem.CreateReuse(0o600)).Return(fileWriter, nil) - fileWriter.EXPECT().Truncate(int64(128)) - fileWriter.EXPECT().Close() - require.NoError(t, f.Truncate(128)) - - // Read back the end of the file. - fileReader := mock.NewMockFileReader(ctrl) - directory.EXPECT().OpenRead(path.MustNewComponent("2")).Return(fileReader, nil) - fileReader.EXPECT().ReadAt(gomock.Any(), int64(120)).DoAndReturn( - func(p []byte, off int64) (int, error) { - require.Len(t, p, 10) - copy(p, "\x00\x00\x00Hello") - return 8, io.EOF - }) - fileReader.EXPECT().Close() - var p [10]byte - n, err = f.ReadAt(p[:], 120) - require.Equal(t, 8, n) - require.Equal(t, io.EOF, err) - require.Equal(t, []byte("\x00\x00\x00Hello"), p[:8]) - - // Calls for GetNextRegionOffset() should be forwarded. - fileReader = mock.NewMockFileReader(ctrl) - directory.EXPECT().OpenRead(path.MustNewComponent("2")).Return(fileReader, nil) - fileReader.EXPECT().GetNextRegionOffset(int64(0), filesystem.Hole).Return(int64(123), nil) - fileReader.EXPECT().Close() - off, err := f.GetNextRegionOffset(0, filesystem.Hole) - require.NoError(t, err) - require.Equal(t, int64(123), off) - - directory.EXPECT().Remove(path.MustNewComponent("2")).Return(nil) - require.NoError(t, f.Close()) - }) -} diff --git a/pkg/filesystem/file_pool.go b/pkg/filesystem/file_pool.go deleted file mode 100644 index 1dc91206..00000000 --- a/pkg/filesystem/file_pool.go +++ /dev/null @@ -1,15 +0,0 @@ -package filesystem - -import ( - "github.com/buildbarn/bb-storage/pkg/filesystem" -) - -// FilePool is an allocator for temporary files. Files are created by -// calling NewFile(). They are automatically removed by calling Close(). -// -// File handles returned by NewFile() are not thread-safe. Additional -// locking needs to be done at higher levels to permit safe concurrent -// access. -type FilePool interface { - NewFile() (filesystem.FileReadWriter, error) -} diff --git a/pkg/filesystem/filepool/BUILD.bazel b/pkg/filesystem/filepool/BUILD.bazel new file mode 100644 index 00000000..d82c23eb --- /dev/null +++ b/pkg/filesystem/filepool/BUILD.bazel @@ -0,0 +1,49 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "filepool", + srcs = [ + "block_device_backed_file_pool.go", + "configuration.go", + "empty_file_pool.go", + "file_pool.go", + "metrics_file_pool.go", + "quota_enforcing_file_pool.go", + "simple_sparse_reader_at.go", + "sparse_reader_at.go", + "truncatable_sparse_reader_at.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool", + visibility = ["//visibility:public"], + deps = [ + "//pkg/filesystem", + "//pkg/proto/configuration/filesystem", + "@com_github_buildbarn_bb_storage//pkg/blockdevice", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_prometheus_client_golang//prometheus", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) + +go_test( + name = "filepool_test", + srcs = [ + "block_device_backed_file_pool_test.go", + "empty_file_pool_test.go", + "quota_enforcing_file_pool_test.go", + "simple_sparse_reader_at_test.go", + "truncatable_sparse_reader_at_test.go", + ], + deps = [ + ":filepool", + "//internal/mock", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/testutil", + "@com_github_stretchr_testify//require", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_uber_go_mock//gomock", + ], +) diff --git a/pkg/filesystem/block_device_backed_file_pool.go b/pkg/filesystem/filepool/block_device_backed_file_pool.go similarity index 73% rename from pkg/filesystem/block_device_backed_file_pool.go rename to pkg/filesystem/filepool/block_device_backed_file_pool.go index f3e92b1c..8432f57d 100644 --- a/pkg/filesystem/block_device_backed_file_pool.go +++ b/pkg/filesystem/filepool/block_device_backed_file_pool.go @@ -1,9 +1,11 @@ -package filesystem +package filepool import ( "fmt" "io" + "strings" + re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" "github.com/buildbarn/bb-storage/pkg/blockdevice" "github.com/buildbarn/bb-storage/pkg/filesystem" @@ -13,7 +15,7 @@ import ( type blockDeviceBackedFilePool struct { blockDevice blockdevice.BlockDevice - sectorAllocator SectorAllocator + sectorAllocator re_filesystem.SectorAllocator sectorSizeBytes int zeroSector []byte } @@ -23,7 +25,7 @@ type blockDeviceBackedFilePool struct { // device tends to be faster than using a directory on a file system, // for the reason that no metadata (e.g., a directory hierarchy and // inode attributes) needs to be stored. -func NewBlockDeviceBackedFilePool(blockDevice blockdevice.BlockDevice, sectorAllocator SectorAllocator, sectorSizeBytes int) FilePool { +func NewBlockDeviceBackedFilePool(blockDevice blockdevice.BlockDevice, sectorAllocator re_filesystem.SectorAllocator, sectorSizeBytes int) FilePool { return &blockDeviceBackedFilePool{ blockDevice: blockDevice, sectorAllocator: sectorAllocator, @@ -32,16 +34,31 @@ func NewBlockDeviceBackedFilePool(blockDevice blockdevice.BlockDevice, sectorAll } } -func (fp *blockDeviceBackedFilePool) NewFile() (filesystem.FileReadWriter, error) { - return &blockDeviceBackedFile{ - fp: fp, - }, nil +func (fp *blockDeviceBackedFilePool) NewFile(sparseReaderAt SparseReaderAt, initialSize uint64) (filesystem.FileReadWriter, error) { + var err error + if sparseReaderAt == nil { + if initialSize != 0 { + return nil, status.Errorf(codes.InvalidArgument, "initial size must be zero when sparseReaderAt is nil") + } + if sparseReaderAt, err = NewSimpleSparseReaderAt(strings.NewReader(""), nil, 0); err != nil { + return nil, status.Errorf(codes.Internal, "failed to create empty SparseReaderAt: %v", err) + } + } + fr := &blockDeviceBackedFile{ + fp: fp, + underlying: NewTruncatableSparseReaderAt(sparseReaderAt, int64(initialSize)), + } + if err = fr.Truncate(int64(initialSize)); err != nil { + return nil, status.Errorf(codes.Internal, "failed to truncate file to initial size: %v", err) + } + return fr, nil } type blockDeviceBackedFile struct { - fp *blockDeviceBackedFilePool - sizeBytes uint64 - sectors []uint32 + fp *blockDeviceBackedFilePool + underlying TruncatableSparseReaderAt + sizeBytes uint64 + sectors []uint32 } func (f *blockDeviceBackedFile) Close() error { @@ -114,7 +131,7 @@ func (f *blockDeviceBackedFile) limitBufferToSectorBoundary(p []byte, sectorCoun return p } -func (f *blockDeviceBackedFile) GetNextRegionOffset(off int64, regionType filesystem.RegionType) (int64, error) { +func (f *blockDeviceBackedFile) getNextRegionOffsetForOverlay(off int64, regionType filesystem.RegionType) (int64, error) { // Short circuit calls that are out of bounds. if off < 0 { return 0, status.Errorf(codes.InvalidArgument, "Negative seek offset: %d", off) @@ -164,6 +181,77 @@ func (f *blockDeviceBackedFile) GetNextRegionOffset(off int64, regionType filesy } } +func (f *blockDeviceBackedFile) GetNextRegionOffset(off int64, regionType filesystem.RegionType) (int64, error) { + // Short circuit calls that are out of bounds. + if off < 0 { + return 0, status.Errorf(codes.InvalidArgument, "Negative seek offset: %d", off) + } + if uint64(off) >= f.sizeBytes { + return 0, io.EOF + } + + // Data is represented by the existence of a written sector in + // either the overlay or the underlying file. Holes are represented + // by the absence of a written sector in the overlay _and_ a hole in + // the underlying file. + // + // For data this is the lowest valued offset of the two candidates. + // For holes it's the first position which both sources agree upon + // are holes. + switch regionType { + case filesystem.Data: + data1, err := f.underlying.GetNextRegionOffset(off, filesystem.Data) + if err == io.EOF { + // No more data in the underlying file. Return the result + // from the overlay. + return f.getNextRegionOffsetForOverlay(off, filesystem.Data) + } + if err != nil { + return data1, status.Errorf(codes.Internal, "unexpected error while searching for data in underlying file: %v", err) + } + data2, err := f.getNextRegionOffsetForOverlay(off, filesystem.Data) + if err == io.EOF { + // No more data in the overlay, return the data from the + // underlying file. + return data1, nil + } + if err != nil { + return data2, status.Errorf(codes.Internal, "unexpected error while searching for data in underlying file: %v", err) + } + if data1 < data2 { + return data1, nil + } + return data2, nil + case filesystem.Hole: + for { + // Since we have already ruled out that we are past the EOF + // boundary no calls to GetNextRegionOffset should be + // capable of returning holes. + hole1, err := f.underlying.GetNextRegionOffset(off, filesystem.Hole) + if err != nil { + return hole1, status.Errorf(codes.Internal, "unexpected error while searching for hole in underlying file: %v", err) + } + hole2, err := f.getNextRegionOffsetForOverlay(off, filesystem.Hole) + if err != nil { + return hole2, status.Errorf(codes.Internal, "unexpected error while searching for hole in overlay file: %v", err) + } + if hole1 == hole2 { + // Both sources agree that it's a hole. + return hole1, nil + } + if hole1 == int64(f.sizeBytes) || hole2 == int64(f.sizeBytes) { + // The only possible hole is the implicit hole at the + // end of the file. + return int64(f.sizeBytes), nil + } + // Continue searching at the next possible offset. + off = max(hole1, hole2) + } + default: + panic("Unknown region type") + } +} + // readFromSectors performs a single read against the block device. It // attempts to read as much data into the output buffer as is possible // in a single read operation. If the file is fragmented, multiple reads @@ -171,23 +259,17 @@ func (f *blockDeviceBackedFile) GetNextRegionOffset(off int64, regionType filesy func (f *blockDeviceBackedFile) readFromSectors(p []byte, sectorIndex, lastSectorIndex, offsetWithinSector int) (int, error) { if sectorIndex >= len(f.sectors) { // Attempted to read from a hole located at the - // end of the file. Fill up all of the remaining - // space with zero bytes. - for i := 0; i < len(p); i++ { - p[i] = 0 - } - return len(p), nil + // end of the file. Delegate to ReadLayer. + offset := f.fp.sectorSizeBytes*sectorIndex + offsetWithinSector + return f.underlying.ReadAt(p, int64(offset)) } sector, sectorsToRead := f.getSectorsContiguous(sectorIndex, lastSectorIndex) p = f.limitBufferToSectorBoundary(p, sectorsToRead, offsetWithinSector) if sector == 0 { // Attempted to read from a sparse region of the file. - // Fill in zero bytes. - for i := 0; i < len(p); i++ { - p[i] = 0 - } - return len(p), nil + offset := f.fp.sectorSizeBytes*sectorIndex + offsetWithinSector + return f.underlying.ReadAt(p, int64(offset)) } // Attempted to read from a region of the file that contains @@ -266,6 +348,9 @@ func (f *blockDeviceBackedFile) Truncate(size int64) error { if size < 0 { return status.Errorf(codes.InvalidArgument, "Negative truncation size: %d", size) } + if err := f.underlying.Truncate(size); err != nil { + return status.Errorf(codes.Internal, "truncating the underlying file failed: %v", err) + } sectorIndex := int(size / int64(f.fp.sectorSizeBytes)) offsetWithinSector := int(size % int64(f.fp.sectorSizeBytes)) @@ -298,7 +383,7 @@ func (f *blockDeviceBackedFile) Truncate(size int64) error { // writeToNewSectors is used to write data into new sectors. This // function is called when holes in a sparse file are filled up or when // data is appended to the end of a file. -func (f *blockDeviceBackedFile) writeToNewSectors(p []byte, offsetWithinSector int) (int, uint32, int, error) { +func (f *blockDeviceBackedFile) writeToNewSectors(p []byte, fromSector, offsetWithinSector int) (int, uint32, int, error) { // Allocate space to store the data. sectorsToAllocate := int((uint64(offsetWithinSector) + uint64(len(p)) + uint64(f.fp.sectorSizeBytes) - 1) / uint64(f.fp.sectorSizeBytes)) firstSector, sectorsAllocated, err := f.fp.sectorAllocator.AllocateContiguous(sectorsToAllocate) @@ -313,10 +398,15 @@ func (f *blockDeviceBackedFile) writeToNewSectors(p []byte, offsetWithinSector i nWritten := len(p) // Write the first sector separately when we need to introduce - // leading zero padding. + // leading read layer padding. sector := firstSector if offsetWithinSector > 0 { buf := make([]byte, f.fp.sectorSizeBytes) + logicalOffset := fromSector * f.fp.sectorSizeBytes + if _, err := f.underlying.ReadAt(buf[:offsetWithinSector], int64(logicalOffset)); err != nil { + f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) + return 0, 0, 0, err + } nWritten := copy(buf[offsetWithinSector:], p) if _, err := f.fp.blockDevice.WriteAt(buf, f.toDeviceOffset(sector, 0)); err != nil { f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) @@ -339,9 +429,14 @@ func (f *blockDeviceBackedFile) writeToNewSectors(p []byte, offsetWithinSector i } // Write the last sector separately when we need to introduce - // trailing zero padding. + // trailing read layer padding. if len(p) > 0 { buf := make([]byte, f.fp.sectorSizeBytes) + logicalOffset := uint32(len(p)) + (sector-firstSector)*uint32(f.fp.sectorSizeBytes) + if _, err := f.underlying.ReadAt(buf[len(p):], int64(logicalOffset)); err != nil { + f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) + return 0, 0, 0, err + } copy(buf, p) if _, err := f.fp.blockDevice.WriteAt(buf, f.toDeviceOffset(sector, 0)); err != nil { f.fp.sectorAllocator.FreeContiguous(firstSector, sectorsAllocated) @@ -374,7 +469,7 @@ func (f *blockDeviceBackedFile) writeToSectors(p []byte, sectorIndex, lastSector // Attempted to write past the end-of-file or within a // hole located at the end of a sparse file. Allocate // space and grow the file. - bytesWritten, firstSector, sectorsAllocated, err := f.writeToNewSectors(p, offsetWithinSector) + bytesWritten, firstSector, sectorsAllocated, err := f.writeToNewSectors(p, sectorIndex, offsetWithinSector) if err != nil { return 0, err } @@ -388,7 +483,7 @@ func (f *blockDeviceBackedFile) writeToSectors(p []byte, sectorIndex, lastSector if sector == 0 { // Attempted to write to a hole within a sparse file. // Allocate space and insert sectors into the file. - bytesWritten, firstSector, sectorsAllocated, err := f.writeToNewSectors(p, offsetWithinSector) + bytesWritten, firstSector, sectorsAllocated, err := f.writeToNewSectors(p, sectorIndex, offsetWithinSector) if err != nil { return 0, err } @@ -408,6 +503,13 @@ func (f *blockDeviceBackedFile) WriteAt(p []byte, off int64) (int, error) { if len(p) == 0 { return 0, nil } + // Truncate the file to a larger size if needed to accomodate the + // read. + if f.sizeBytes < uint64(off)+uint64(len(p)) { + if err := f.Truncate(off + int64(len(p))); err != nil { + return 0, err + } + } // As the file may be stored on disk non-contiguously or may be // a sparse file with holes, the write may need to be decomposed diff --git a/pkg/filesystem/block_device_backed_file_pool_test.go b/pkg/filesystem/filepool/block_device_backed_file_pool_test.go similarity index 66% rename from pkg/filesystem/block_device_backed_file_pool_test.go rename to pkg/filesystem/filepool/block_device_backed_file_pool_test.go index dac1bc36..a676e28d 100644 --- a/pkg/filesystem/block_device_backed_file_pool_test.go +++ b/pkg/filesystem/filepool/block_device_backed_file_pool_test.go @@ -1,12 +1,13 @@ -package filesystem_test +package filepool_test import ( "io" "math" + "strings" "testing" "github.com/buildbarn/bb-remote-execution/internal/mock" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-storage/pkg/filesystem" "github.com/buildbarn/bb-storage/pkg/testutil" "github.com/stretchr/testify/require" @@ -22,11 +23,11 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { blockDevice := mock.NewMockBlockDevice(ctrl) sectorAllocator := mock.NewMockSectorAllocator(ctrl) - pool := re_filesystem.NewBlockDeviceBackedFilePool(blockDevice, sectorAllocator, 16) + pool := filepool.NewBlockDeviceBackedFilePool(blockDevice, sectorAllocator, 16) t.Run("ReadEmptyFile", func(t *testing.T) { // Test that reads on an empty file work as expected. - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) var p [10]byte @@ -54,7 +55,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("Truncate", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) // Invalid size. @@ -102,7 +103,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("WritesAndReadOnSingleSector", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) // The initial write to a sector should cause the full @@ -139,7 +140,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("WriteFragmentation", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) // Simulate the case where 137 bytes of data needs to be @@ -172,7 +173,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("WriteSectorAllocatorFailure", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) // Failure to allocate sectors should cause the write to @@ -192,7 +193,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("WriteIOFailure", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) // Write failures to freshly allocator sectors should @@ -215,7 +216,7 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { t.Run("GetNextRegionOffset", func(t *testing.T) { // Test the behavior on empty files. - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) _, err = f.GetNextRegionOffset(-1, filesystem.Data) @@ -313,10 +314,172 @@ func TestBlockDeviceBackedFilePool(t *testing.T) { }) t.Run("WriteAt", func(t *testing.T) { - f, err := pool.NewFile() + f, err := pool.NewFile(nil, 0) require.NoError(t, err) _, err = f.WriteAt([]byte{0}, -1) testutil.RequireEqualStatus(t, status.Error(codes.InvalidArgument, "Negative write offset: -1"), err) }) } + +func TestFilePoolWithSparseUnderlyingFile(t *testing.T) { + ctrl := gomock.NewController(t) + blockDevice := mock.NewMockBlockDevice(ctrl) + sectorAllocator := mock.NewMockSectorAllocator(ctrl) + pool := filepool.NewBlockDeviceBackedFilePool(blockDevice, sectorAllocator, 16) + + t.Run("ReadFromHoleReturnsBackingData", func(t *testing.T) { + sparseReaderAt, err := filepool.NewSimpleSparseReaderAt(strings.NewReader("HelloWorld"), nil, 10) + require.NoError(t, err) + file, err := pool.NewFile(sparseReaderAt, 10) + require.NoError(t, err) + + buf := make([]byte, 10) + n, err := file.ReadAt(buf, 0) + require.Equal(t, err, io.EOF) + require.Equal(t, 10, n) + require.Equal(t, []byte("HelloWorld"), buf) + + require.NoError(t, file.Close()) + }) + + t.Run("CannotReadFromHoleBeyondBacking", func(t *testing.T) { + sparseReaderAt, err := filepool.NewSimpleSparseReaderAt(strings.NewReader("abc"), nil, 3) + require.NoError(t, err) + file, err := pool.NewFile(sparseReaderAt, 3) + require.NoError(t, err) + + buf := make([]byte, 6) + n, err := file.ReadAt(buf, 0) + require.Equal(t, err, io.EOF) + require.Equal(t, 3, n) + require.Equal(t, []byte("abc\x00\x00\x00"), buf) + + require.NoError(t, file.Close()) + }) + + t.Run("TruncatePropagatesToBackingLayer", func(t *testing.T) { + sparseReaderAt, err := filepool.NewSimpleSparseReaderAt(strings.NewReader("abcdefhij"), nil, 10) + require.NoError(t, err) + file, err := pool.NewFile(sparseReaderAt, 10) + require.NoError(t, err) + + require.NoError(t, file.Truncate(4)) + buf := make([]byte, 6) + n, err := file.ReadAt(buf, 0) + require.Equal(t, 4, n) + require.Equal(t, io.EOF, err) + require.Equal(t, []byte("abcd\x00\x00"), buf) + + require.NoError(t, file.Close()) + }) + + t.Run("TruncateHasNoGhosting", func(t *testing.T) { + sparseReaderAt, err := filepool.NewSimpleSparseReaderAt(strings.NewReader("abcdefhij"), nil, 10) + require.NoError(t, err) + file, err := pool.NewFile(sparseReaderAt, 10) + require.NoError(t, err) + // shrunk to 4 bytes + require.NoError(t, file.Truncate(4)) + buf := make([]byte, 6) + n, err := file.ReadAt(buf, 0) + require.Equal(t, 4, n) + require.Equal(t, io.EOF, err) + require.Equal(t, []byte("abcd\x00\x00"), buf) + // grow to 10 bytes + require.NoError(t, file.Truncate(10)) + n, err = file.ReadAt(buf, 0) + require.Equal(t, 6, n) + require.NoError(t, err) + require.Equal(t, []byte("abcd\x00\x00"), buf) + + require.NoError(t, file.Close()) + }) + + t.Run("WriteOverridesBackingLayer", func(t *testing.T) { + // Use 4 byte sectors for clarity. + pool := filepool.NewBlockDeviceBackedFilePool(blockDevice, sectorAllocator, 4) + // Each letter covers a full sector. + sparseReaderAt, err := filepool.NewSimpleSparseReaderAt(strings.NewReader("AAAABBBBCCCC"), nil, 12) + file, err := pool.NewFile(sparseReaderAt, 12) + require.NoError(t, err) + + // Write ZZZ at offset 2, this spans the first two sectors which + // requires us to bring them into our sparse file. We will allocate + // sector 10 and 11 of our block device for this (address 36 and 40). + sectorAllocator.EXPECT().AllocateContiguous(2).Return(uint32(10), 2, nil) + blockDevice.EXPECT().WriteAt([]byte("AAZZ"), int64(36)).Return(4, nil) + blockDevice.EXPECT().WriteAt([]byte("ZBBB"), int64(40)).Return(4, nil) + n, err := file.WriteAt([]byte("ZZZ"), 2) + require.NoError(t, err) + require.Equal(t, 3, n) + + // This data would then be expected to be read back to us. + blockDevice.EXPECT().ReadAt(gomock.Len(8), int64(36)).DoAndReturn( + func(p []byte, offset int64) (int, error) { + copy(p, []byte("AAZZZBBB")) + return 8, nil + }, + ) + + buf := make([]byte, 12) + n, err = file.ReadAt(buf, 0) + require.Equal(t, io.EOF, err) + require.Equal(t, 12, n) + require.Equal(t, []byte("AAZZZBBBCCCC"), buf) + + sectorAllocator.EXPECT().FreeList([]uint32{10, 11}) + require.NoError(t, file.Close()) + }) + + t.Run("EffectivelyDense", func(t *testing.T) { + // Use 2 byte sectors. + pool := filepool.NewBlockDeviceBackedFilePool(blockDevice, sectorAllocator, 2) + // Use the sparse string 'xxBBxxDD' for the underlying. + sparseReader, err := filepool.NewSimpleSparseReaderAt(strings.NewReader("\x00\x00BB\x00\x00DD"), []filepool.Range{{Off: 0, Len: 2}, {Off: 4, Len: 2}}, 8) + require.NoError(t, err) + file, err := pool.NewFile(sparseReader, 8) + require.NoError(t, err) + + // Write exactly AA and DD to the overlay. + sectorAllocator.EXPECT().AllocateContiguous(1).Return(uint32(10), 1, nil) + sectorAllocator.EXPECT().AllocateContiguous(1).Return(uint32(11), 1, nil) + blockDevice.EXPECT().WriteAt([]byte("AA"), int64(18)).Return(2, nil) + blockDevice.EXPECT().WriteAt([]byte("CC"), int64(20)).Return(2, nil) + n, err := file.WriteAt([]byte("AA"), 0) + require.NoError(t, err) + require.Equal(t, n, 2) + n, err = file.WriteAt([]byte("CC"), 4) + require.NoError(t, err) + require.Equal(t, n, 2) + + // Read it back. + blockDevice.EXPECT().ReadAt(gomock.Len(2), int64(18)).DoAndReturn( + func(p []byte, offset int64) (int, error) { + copy(p, []byte("AA")) + return 2, nil + }, + ) + blockDevice.EXPECT().ReadAt(gomock.Len(2), int64(20)).DoAndReturn( + func(p []byte, offset int64) (int, error) { + copy(p, []byte("CC")) + return 2, nil + }, + ) + + buf := make([]byte, 8) + n, err = file.ReadAt(buf, 0) + require.Equal(t, 8, n) + require.Equal(t, []byte("AABBCCDD"), buf) + + // Verify sparsity + for i := int64(0); i < 8; i++ { + o, err := file.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, i, o) + o, err = file.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, int64(8), o) + } + }) +} diff --git a/pkg/filesystem/configuration.go b/pkg/filesystem/filepool/configuration.go similarity index 66% rename from pkg/filesystem/configuration.go rename to pkg/filesystem/filepool/configuration.go index 94850c56..50660d83 100644 --- a/pkg/filesystem/configuration.go +++ b/pkg/filesystem/filepool/configuration.go @@ -1,12 +1,11 @@ -package filesystem +package filepool import ( "math" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem" pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem" "github.com/buildbarn/bb-storage/pkg/blockdevice" - "github.com/buildbarn/bb-storage/pkg/filesystem" - "github.com/buildbarn/bb-storage/pkg/filesystem/path" "github.com/buildbarn/bb-storage/pkg/util" "google.golang.org/grpc/codes" @@ -25,18 +24,6 @@ func NewFilePoolFromConfiguration(configuration *pb.FilePoolConfiguration) (File var filePool FilePool switch backend := configuration.Backend.(type) { - case *pb.FilePoolConfiguration_InMemory: - filePool = InMemoryFilePool - case *pb.FilePoolConfiguration_DirectoryPath: - directory, err := filesystem.NewLocalDirectory(path.LocalFormat.NewParser(backend.DirectoryPath)) - if err != nil { - return nil, util.StatusWrapf(err, "Failed to open directory %#v", backend.DirectoryPath) - } - if err := directory.RemoveAllChildren(); err != nil { - directory.Close() - return nil, util.StatusWrapf(err, "Failed to empty out directory %#v", backend.DirectoryPath) - } - filePool = NewDirectoryBackedFilePool(directory) case *pb.FilePoolConfiguration_BlockDevice: blockDevice, sectorSizeBytes, sectorCount, err := blockdevice.NewBlockDeviceFromConfiguration(backend.BlockDevice, true) if err != nil { @@ -47,7 +34,7 @@ func NewFilePoolFromConfiguration(configuration *pb.FilePoolConfiguration) (File } filePool = NewBlockDeviceBackedFilePool( blockDevice, - NewBitmapSectorAllocator(uint32(sectorCount)), + filesystem.NewBitmapSectorAllocator(uint32(sectorCount)), sectorSizeBytes) default: return nil, status.Error(codes.InvalidArgument, "Configuration did not contain a supported file pool backend") diff --git a/pkg/filesystem/empty_file_pool.go b/pkg/filesystem/filepool/empty_file_pool.go similarity index 83% rename from pkg/filesystem/empty_file_pool.go rename to pkg/filesystem/filepool/empty_file_pool.go index 1e60af32..67fc33a8 100644 --- a/pkg/filesystem/empty_file_pool.go +++ b/pkg/filesystem/filepool/empty_file_pool.go @@ -1,4 +1,4 @@ -package filesystem +package filepool import ( "github.com/buildbarn/bb-storage/pkg/filesystem" @@ -9,7 +9,7 @@ import ( type emptyFilePool struct{} -func (fp emptyFilePool) NewFile() (filesystem.FileReadWriter, error) { +func (fp emptyFilePool) NewFile(SparseReaderAt, uint64) (filesystem.FileReadWriter, error) { return nil, status.Error(codes.ResourceExhausted, "Cannot create file in empty file pool") } diff --git a/pkg/filesystem/empty_file_pool_test.go b/pkg/filesystem/filepool/empty_file_pool_test.go similarity index 66% rename from pkg/filesystem/empty_file_pool_test.go rename to pkg/filesystem/filepool/empty_file_pool_test.go index e5696fa3..54a6a581 100644 --- a/pkg/filesystem/empty_file_pool_test.go +++ b/pkg/filesystem/filepool/empty_file_pool_test.go @@ -1,9 +1,9 @@ -package filesystem_test +package filepool_test import ( "testing" - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" @@ -11,6 +11,6 @@ import ( ) func TestEmptyFilePool(t *testing.T) { - _, err := filesystem.EmptyFilePool.NewFile() + _, err := filepool.EmptyFilePool.NewFile(nil, 0) require.Equal(t, err, status.Error(codes.ResourceExhausted, "Cannot create file in empty file pool")) } diff --git a/pkg/filesystem/filepool/file_pool.go b/pkg/filesystem/filepool/file_pool.go new file mode 100644 index 00000000..9b40899c --- /dev/null +++ b/pkg/filesystem/filepool/file_pool.go @@ -0,0 +1,21 @@ +package filepool + +import ( + "github.com/buildbarn/bb-storage/pkg/filesystem" +) + +// FilePool is an allocator for temporary files. Files are created by +// calling NewFile(). They are automatically removed by calling Close(). +// +// File handles returned by NewFile() are not thread-safe. Additional +// locking needs to be done at higher levels to permit safe concurrent +// access. +// +// If the sparseReaderAt parameter is not nil the file is created as if +// it had the initial content of the supplied SparseReaderAt. The size +// parameter must be less than or equal to the size of the underlying +// data. If the sparseReaderAt parameter is nil the file is created +// empty. +type FilePool interface { + NewFile(sparseReaderAt SparseReaderAt, size uint64) (filesystem.FileReadWriter, error) +} diff --git a/pkg/filesystem/metrics_file_pool.go b/pkg/filesystem/filepool/metrics_file_pool.go similarity index 88% rename from pkg/filesystem/metrics_file_pool.go rename to pkg/filesystem/filepool/metrics_file_pool.go index 7938cd96..377ec358 100644 --- a/pkg/filesystem/metrics_file_pool.go +++ b/pkg/filesystem/filepool/metrics_file_pool.go @@ -1,4 +1,4 @@ -package filesystem +package filepool import ( "sync" @@ -43,8 +43,8 @@ func NewMetricsFilePool(base FilePool) FilePool { } } -func (fp *metricsFilePool) NewFile() (filesystem.FileReadWriter, error) { - f, err := fp.base.NewFile() +func (fp *metricsFilePool) NewFile(sparseReaderAt SparseReaderAt, size uint64) (filesystem.FileReadWriter, error) { + f, err := fp.base.NewFile(sparseReaderAt, size) if err != nil { return nil, err } diff --git a/pkg/filesystem/quota_enforcing_file_pool.go b/pkg/filesystem/filepool/quota_enforcing_file_pool.go similarity index 94% rename from pkg/filesystem/quota_enforcing_file_pool.go rename to pkg/filesystem/filepool/quota_enforcing_file_pool.go index 0b071312..2879c965 100644 --- a/pkg/filesystem/quota_enforcing_file_pool.go +++ b/pkg/filesystem/filepool/quota_enforcing_file_pool.go @@ -1,4 +1,4 @@ -package filesystem +package filepool import ( "sync/atomic" @@ -53,11 +53,11 @@ func NewQuotaEnforcingFilePool(base FilePool, maximumFileCount, maximumTotalSize return fp } -func (fp *quotaEnforcingFilePool) NewFile() (filesystem.FileReadWriter, error) { +func (fp *quotaEnforcingFilePool) NewFile(sparseReaderAt SparseReaderAt, size uint64) (filesystem.FileReadWriter, error) { if !fp.filesRemaining.allocate(1) { return nil, status.Error(codes.InvalidArgument, "File count quota reached") } - f, err := fp.base.NewFile() + f, err := fp.base.NewFile(sparseReaderAt, size) if err != nil { fp.filesRemaining.release(1) return nil, err diff --git a/pkg/filesystem/quota_enforcing_file_pool_test.go b/pkg/filesystem/filepool/quota_enforcing_file_pool_test.go similarity index 87% rename from pkg/filesystem/quota_enforcing_file_pool_test.go rename to pkg/filesystem/filepool/quota_enforcing_file_pool_test.go index d41363fa..1409a495 100644 --- a/pkg/filesystem/quota_enforcing_file_pool_test.go +++ b/pkg/filesystem/filepool/quota_enforcing_file_pool_test.go @@ -1,11 +1,11 @@ -package filesystem_test +package filepool_test import ( "io" "testing" "github.com/buildbarn/bb-remote-execution/internal/mock" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-storage/pkg/filesystem" "github.com/stretchr/testify/require" @@ -18,19 +18,19 @@ import ( // testRemainingQuota is a helper function for the // QuotaEnforcingFilePool tests to check that a certain amount of space // is available within the pool. -func testRemainingQuota(t *testing.T, ctrl *gomock.Controller, underlyingPool *mock.MockFilePool, pool re_filesystem.FilePool, filesRemaining int, bytesRemaining int64) { +func testRemainingQuota(t *testing.T, ctrl *gomock.Controller, underlyingPool *mock.MockFilePool, pool filepool.FilePool, filesRemaining int, bytesRemaining int64) { // Check that the remaining number of files is available by // allocating all of them. underlyingFiles := make([]*mock.MockFileReadWriter, filesRemaining) files := make([]filesystem.FileReadWriter, filesRemaining) for i := 0; i < filesRemaining; i++ { underlyingFiles[i] = mock.NewMockFileReadWriter(ctrl) - underlyingPool.EXPECT().NewFile().Return(underlyingFiles[i], nil) + underlyingPool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFiles[i], nil) var err error - files[i], err = pool.NewFile() + files[i], err = pool.NewFile(nil, 0) require.NoError(t, err) } - _, err := pool.NewFile() + _, err := pool.NewFile(nil, 0) require.Equal(t, err, status.Error(codes.InvalidArgument, "File count quota reached")) for i := 0; i < filesRemaining; i++ { underlyingFiles[i].EXPECT().Close().Return(nil) @@ -40,8 +40,8 @@ func testRemainingQuota(t *testing.T, ctrl *gomock.Controller, underlyingPool *m // Check that the remaining amount of space is available by // allocating one file and truncating it to the exact size. underlyingFile := mock.NewMockFileReadWriter(ctrl) - underlyingPool.EXPECT().NewFile().Return(underlyingFile, nil) - f, err := pool.NewFile() + underlyingPool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) + f, err := pool.NewFile(nil, 0) require.NoError(t, err) if bytesRemaining != 0 { underlyingFile.EXPECT().Truncate(bytesRemaining).Return(nil) @@ -57,20 +57,20 @@ func TestQuotaEnforcingFilePoolExample(t *testing.T) { // An empty pool should have the advertised amount of space available. underlyingPool := mock.NewMockFilePool(ctrl) - pool := re_filesystem.NewQuotaEnforcingFilePool(underlyingPool, 10, 1000) + pool := filepool.NewQuotaEnforcingFilePool(underlyingPool, 10, 1000) testRemainingQuota(t, ctrl, underlyingPool, pool, 10, 1000) // Failure to allocate a file from the underlying pool should // not affect the quota. - underlyingPool.EXPECT().NewFile().Return(nil, status.Error(codes.Internal, "I/O error")) - _, err := pool.NewFile() + underlyingPool.EXPECT().NewFile(nil, uint64(0)).Return(nil, status.Error(codes.Internal, "I/O error")) + _, err := pool.NewFile(nil, 0) require.Equal(t, err, status.Error(codes.Internal, "I/O error")) testRemainingQuota(t, ctrl, underlyingPool, pool, 10, 1000) // Successfully allocate a file. underlyingFile := mock.NewMockFileReadWriter(ctrl) - underlyingPool.EXPECT().NewFile().Return(underlyingFile, nil) - f, err := pool.NewFile() + underlyingPool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) + f, err := pool.NewFile(nil, 0) require.NoError(t, err) testRemainingQuota(t, ctrl, underlyingPool, pool, 9, 1000) diff --git a/pkg/filesystem/filepool/simple_sparse_reader_at.go b/pkg/filesystem/filepool/simple_sparse_reader_at.go new file mode 100644 index 00000000..7d349d6d --- /dev/null +++ b/pkg/filesystem/filepool/simple_sparse_reader_at.go @@ -0,0 +1,130 @@ +package filepool + +import ( + "io" + "sort" + + "github.com/buildbarn/bb-storage/pkg/filesystem" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ReaderAt is type alias of io.ReaderAt used for mock generation. +type ReaderAt = io.ReaderAt + +// Range describes an offset interval. Used by SimpleSparseReaderAt to +// describe the holes in the io.ReaderAt. +type Range struct { + Off int64 + Len int64 +} + +// NewSimpleSparseReaderAt creates a SparseReaderAt from an io.ReaderAt +// and a list of holes. +// +// Note: This decorates io.ReaderAt with the necessary metadata to +// fulfill the SparseReaderAt interface. It doesn't perform the zeroing +// of reads in holes or padding of reads past the EOF boundary. It is +// assumed that the underlying io.ReaderAt already does that. +func NewSimpleSparseReaderAt(reader io.ReaderAt, holes []Range, sizeBytes int64) (SparseReaderAt, error) { + sort.Slice(holes, func(i, j int) bool { + return holes[i].Off < holes[j].Off + }) + for _, hole := range holes { + if hole.Len < 0 { + return nil, status.Errorf(codes.InvalidArgument, "invalid hole: %v", hole) + } + if hole.Off > sizeBytes { + return nil, status.Errorf(codes.InvalidArgument, "hole out of bounds: %v", hole) + } + } + // Explicitly add the implicit hole at the end of file to simplify + // the code. + holes = append(holes, Range{Off: sizeBytes, Len: 1}) + // Reduce to simplest reprentation by merging adjacent holes. + reducedHoles := make([]Range, 0, len(holes)) + prev := holes[0] + for i := 1; i < len(holes); i++ { + hole := holes[i] + prevEnd := prev.Off + prev.Len + // This hole starts before or is adjacent to the previous hole. + if hole.Off <= prevEnd { + end := hole.Off + hole.Len + // Extend the previous hole to the end of this hole. + if end > prevEnd { + prev.Len += end - prevEnd + } + continue + } + // Save it in the reduced set only if it is not a zero length + // hole. + if prev.Len > 0 { + reducedHoles = append(reducedHoles, prev) + } + prev = hole + } + reducedHoles = append(reducedHoles, prev) + return &sparseReaderAt{ + reader: reader, + holes: reducedHoles, + sizeBytes: sizeBytes, + }, nil +} + +type sparseReaderAt struct { + reader io.ReaderAt + holes []Range + sizeBytes int64 +} + +func (s *sparseReaderAt) ReadAt(p []byte, off int64) (int, error) { + n, err := s.reader.ReadAt(p, off) + return n, err +} + +func (s *sparseReaderAt) GetNextRegionOffset(offset int64, regionType filesystem.RegionType) (int64, error) { + // Find the offset of the first hole or data >= offset. Resolves it + // by binary searching for the two surrounding holes. Since we + // add the implicit hole at the end of file to the list of holes in + // the constructor we will always have atleast one hole in the list. + if offset < 0 { + return 0, status.Errorf(codes.InvalidArgument, "negative offset: %d", offset) + } + // Out of bounds. + if offset >= s.sizeBytes { + return 0, io.EOF + } + // Index of first hole with offset greater than the given offset. + nextIndex := sort.Search(len(s.holes), func(i int) bool { + return s.holes[i].Off > offset + }) + // Index of the last hole with offset less than or equal to the + // given offset. + prevIndex := nextIndex - 1 + switch regionType { + case filesystem.Hole: + if prevIndex == -1 { + return s.holes[0].Off, nil + } + prev := s.holes[prevIndex] + if prev.Off+prev.Len > offset { + return offset, nil + } + return s.holes[nextIndex].Off, nil + case filesystem.Data: + if prevIndex == -1 { + return offset, nil + } + prev := s.holes[prevIndex] + after := prev.Off + prev.Len + if after >= s.sizeBytes { + return 0, io.EOF + } + if after > offset { + return after, nil + } + return offset, nil + default: + return 0, status.Errorf(codes.InvalidArgument, "unknown region type: %v", regionType) + } +} diff --git a/pkg/filesystem/filepool/simple_sparse_reader_at_test.go b/pkg/filesystem/filepool/simple_sparse_reader_at_test.go new file mode 100644 index 00000000..b9c4dbb3 --- /dev/null +++ b/pkg/filesystem/filepool/simple_sparse_reader_at_test.go @@ -0,0 +1,77 @@ +package filepool_test + +import ( + "io" + "testing" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestSimpleSparseReaderAt(t *testing.T) { + ctrl := gomock.NewController(t) + readerAt := mock.NewMockReaderAt(ctrl) + t.Run("TestGetNextRegionOffset", func(t *testing.T) { + // All permutations of holes in a 4 character string. Represented + // by the bitmask of holes (values 0b0000 to 0b1111). + for i := range 1 << 4 { + mask := (^uint16(0) >> 4 << 4) | uint16(i) + isHole := func(index int) bool { + return mask&(1<= uint64(r.logicalSize) { + return 0, io.EOF + } + var success error + if end := off + int64(len(p)); end >= r.logicalSize { + success = io.EOF + p = p[:r.logicalSize-off] + } + + // Bytes to read from the underlying stream. + n1 := min(int64(len(p)), max(r.sizeBytes-off, 0)) + n2, err := r.readUnderlyingSuppressEOF(p[:n1], off) + if int64(n2) != n1 || err != nil { + return n2, err + } + for i := range len(p[n1:]) { + p[n1+int64(i)] = 0 + } + return len(p), success +} + +func (r *truncatableSparseReaderAt) GetNextRegionOffset(offset int64, regionType filesystem.RegionType) (int64, error) { + // For indexes within r.sizeBytes this interface has holes where the + // underlying interface has holes and data where the underlying + // interface has data. For indexes outside of r.sizeBytes this + // interface is all holes. + if offset < 0 { + return 0, status.Errorf(codes.InvalidArgument, "negative offset %d", offset) + } + if offset >= r.logicalSize { + return 0, io.EOF + } + switch regionType { + case filesystem.Data: + if offset < r.sizeBytes { + innerOffset, err := r.base.GetNextRegionOffset(offset, regionType) + if innerOffset < r.sizeBytes { + return innerOffset, err + } + } + return 0, io.EOF + case filesystem.Hole: + if offset < r.sizeBytes { + innerOffset, err := r.base.GetNextRegionOffset(offset, regionType) + if innerOffset < r.sizeBytes { + return innerOffset, err + } + } + return max(offset, r.sizeBytes), nil + default: + return 0, status.Errorf(codes.InvalidArgument, "unknown region type %d", regionType) + } +} diff --git a/pkg/filesystem/filepool/truncatable_sparse_reader_at_test.go b/pkg/filesystem/filepool/truncatable_sparse_reader_at_test.go new file mode 100644 index 00000000..f2e39622 --- /dev/null +++ b/pkg/filesystem/filepool/truncatable_sparse_reader_at_test.go @@ -0,0 +1,200 @@ +package filepool_test + +import ( + "io" + "strings" + "testing" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/stretchr/testify/require" +) + +func TestTruncatableSparseReaderAt_ReadAt(t *testing.T) { + text := "Hello World!" + underlyingSize := int64(len(text)) + sparseReader, err := filepool.NewSimpleSparseReaderAt(strings.NewReader(text), nil, underlyingSize) + require.NoError(t, err) + + t.Run("ReadWithinBounds", func(t *testing.T) { + r := filepool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + + buf := make([]byte, 5) + n, err := r.ReadAt(buf, 0) + require.NoError(t, err) + require.Equal(t, 5, n) + require.Equal(t, []byte("Hello"), buf[:n]) + }) + + t.Run("ReadAcrossBoundary", func(t *testing.T) { + r := filepool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + + buf := make([]byte, 7) + n, err := r.ReadAt(buf, 6) + require.Equal(t, err, io.EOF) + require.Equal(t, 6, n) + require.Equal(t, []byte("World!\x00"), buf) + }) + + t.Run("TruncateHidesData", func(t *testing.T) { + r := filepool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + + require.NoError(t, r.Truncate(5)) + buf := make([]byte, 6) + n, err := r.ReadAt(buf, 0) + require.Equal(t, err, io.EOF) + require.Equal(t, 5, n) + require.Equal(t, []byte("Hello\x00"), buf) + }) + + t.Run("ShrinkThenGrow", func(t *testing.T) { + r := filepool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + + require.NoError(t, r.Truncate(2)) + buf := make([]byte, 3) + n, err := r.ReadAt(buf, 0) + require.Equal(t, err, io.EOF) + require.Equal(t, 2, n) + require.Equal(t, []byte("He\x00"), buf) + + require.NoError(t, r.Truncate(5)) + n, err = r.ReadAt(buf, 0) + require.NoError(t, err) + require.Equal(t, 3, n) + require.Equal(t, []byte("He\x00"), buf) + }) + + t.Run("EdgeCaseEmptyUnderlying", func(t *testing.T) { + sparseReader, err := filepool.NewSimpleSparseReaderAt(strings.NewReader(""), nil, 0) + r := filepool.NewTruncatableSparseReaderAt(sparseReader, 0) + require.NoError(t, err) + buf := make([]byte, 5) + n, err := r.ReadAt(buf, 0) + require.Equal(t, io.EOF, err) + require.Equal(t, 0, n) + require.NoError(t, r.Truncate(5)) + n, err = r.ReadAt(buf, 0) + require.Equal(t, io.EOF, err) + require.Equal(t, 5, n) + require.Equal(t, []byte("\x00\x00\x00\x00\x00"), buf) + }) +} + +func TestTruncatableSparseReaderAt_GetNextRegionOffset(t *testing.T) { + text := "Hell\x00\x00World!" + underlyingSize := int64(len(text)) + sparseReader, err := filepool.NewSimpleSparseReaderAt(strings.NewReader(text), []filepool.Range{{Off: 4, Len: 2}}, underlyingSize) + require.NoError(t, err) + + t.Run("Untruncated", func(t *testing.T) { + r := filepool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + var err error + var nextOffset int64 + // Holes should be at [4,5] and special eof hole at 12. + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, int64(4)) + } + for i := int64(4); i < 6; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + for i := int64(6); i < underlyingSize; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, underlyingSize) + } + nextOffset, err = r.GetNextRegionOffset(underlyingSize, filesystem.Hole) + require.Equal(t, err, io.EOF) + // Data should be in [0,3] and [6,11] + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + for i := int64(4); i < 6; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, nextOffset, int64(6)) + } + for i := int64(6); i < underlyingSize; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + nextOffset, err = r.GetNextRegionOffset(underlyingSize, filesystem.Data) + require.Equal(t, err, io.EOF) + }) + + t.Run("TruncateToHole", func(t *testing.T) { + r := filepool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + var err error + var nextOffset int64 + err = r.Truncate(6) + require.NoError(t, err) + // Holes should be at [4,5]. + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, int64(4)) + } + for i := int64(4); i < 6; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + nextOffset, err = r.GetNextRegionOffset(6, filesystem.Hole) + require.Equal(t, err, io.EOF) + // Data should be at [0,3]. + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + for i := int64(4); i < 6; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.Equal(t, io.EOF, err) + } + nextOffset, err = r.GetNextRegionOffset(6, filesystem.Data) + require.Equal(t, io.EOF, err) + }) + + t.Run("TruncateThenGrow", func(t *testing.T) { + r := filepool.NewTruncatableSparseReaderAt(sparseReader, underlyingSize) + var err error + var nextOffset int64 + // Truncate down to 4, then grow back up to 12. Expect [4,11] to + // be holes. + err = r.Truncate(4) + require.NoError(t, err) + err = r.Truncate(12) + require.NoError(t, err) + // Holes should be in the region [4,11]. + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, int64(4)) + } + for i := int64(4); i < 12; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Hole) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + nextOffset, err = r.GetNextRegionOffset(12, filesystem.Hole) + require.Equal(t, err, io.EOF) + // Data should be in the region [0,3]. + for i := int64(0); i < 4; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.NoError(t, err) + require.Equal(t, nextOffset, i) + } + for i := int64(4); i < 12; i++ { + nextOffset, err = r.GetNextRegionOffset(i, filesystem.Data) + require.Equal(t, err, io.EOF) + } + nextOffset, err = r.GetNextRegionOffset(12, filesystem.Hole) + require.Equal(t, err, io.EOF) + }) +} diff --git a/pkg/filesystem/in_memory_file_pool.go b/pkg/filesystem/in_memory_file_pool.go deleted file mode 100644 index 04f0c528..00000000 --- a/pkg/filesystem/in_memory_file_pool.go +++ /dev/null @@ -1,81 +0,0 @@ -package filesystem - -import ( - "io" - - "github.com/buildbarn/bb-storage/pkg/filesystem" -) - -type inMemoryFilePool struct{} - -func (fp inMemoryFilePool) NewFile() (filesystem.FileReadWriter, error) { - return &inMemoryFile{}, nil -} - -type inMemoryFile struct { - data []byte -} - -func (f *inMemoryFile) Close() error { - f.data = nil - return nil -} - -func (f *inMemoryFile) GetNextRegionOffset(off int64, regionType filesystem.RegionType) (int64, error) { - // Files are stored in a byte slice contiguously, so there is no - // sparseness. - if off >= int64(len(f.data)) { - return 0, io.EOF - } - switch regionType { - case filesystem.Data: - return off, nil - case filesystem.Hole: - return int64(len(f.data)), nil - default: - panic("Unknown region type") - } -} - -func (f *inMemoryFile) ReadAt(p []byte, off int64) (int, error) { - if int(off) >= len(f.data) { - return 0, io.EOF - } - if n := copy(p, f.data[off:]); n < len(p) { - return n, io.EOF - } - return len(p), nil -} - -func (f *inMemoryFile) Sync() error { - // Because FilePool does not provide any persistency, there is - // no need to synchronize any data. - return nil -} - -func (f *inMemoryFile) Truncate(size int64) error { - if len(f.data) >= int(size) { - // Truncate the file. - f.data = f.data[:size] - } else { - // Grow the file. - f.data = append(f.data, make([]byte, int(size)-len(f.data))...) - } - return nil -} - -func (f *inMemoryFile) WriteAt(p []byte, off int64) (int, error) { - // Zero-sized writes should not cause the file to grow. - if len(p) == 0 { - return 0, nil - } - - if size := int(off) + len(p); len(f.data) < size { - // Grow the file. - f.data = append(f.data, make([]byte, size-len(f.data))...) - } - return copy(f.data[off:], p), nil -} - -// InMemoryFilePool is a FilePool that stores all data in memory. -var InMemoryFilePool FilePool = inMemoryFilePool{} diff --git a/pkg/filesystem/in_memory_file_pool_test.go b/pkg/filesystem/in_memory_file_pool_test.go deleted file mode 100644 index 079b4c91..00000000 --- a/pkg/filesystem/in_memory_file_pool_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package filesystem_test - -import ( - "io" - "testing" - - "github.com/buildbarn/bb-remote-execution/pkg/filesystem" - "github.com/stretchr/testify/require" -) - -func TestInMemoryFilePool(t *testing.T) { - fp := filesystem.InMemoryFilePool - - t.Run("EmptyFile", func(t *testing.T) { - f, err := fp.NewFile() - require.NoError(t, err) - - var p [10]byte - n, err := f.ReadAt(p[:], 0) - require.Equal(t, 0, n) - require.Equal(t, io.EOF, err) - - require.NoError(t, f.Close()) - }) - - t.Run("NonEmptyFile", func(t *testing.T) { - f, err := fp.NewFile() - require.NoError(t, err) - - // Write a piece of text into the file. - n, err := f.WriteAt([]byte("Hello, world"), 123) - require.Equal(t, 12, n) - require.NoError(t, err) - - // Truncate a part of it. - require.NoError(t, f.Truncate(128)) - - // Read back the end of the file. - var p [10]byte - n, err = f.ReadAt(p[:], 120) - require.Equal(t, 8, n) - require.Equal(t, io.EOF, err) - require.Equal(t, []byte("\x00\x00\x00Hello"), p[:8]) - - require.NoError(t, f.Close()) - }) - - t.Run("ZeroSizedWrite", func(t *testing.T) { - f, err := fp.NewFile() - require.NoError(t, err) - - // A zero-sized write should not cause the file to - // actually grow. The read should still return EOF. - n, err := f.WriteAt(nil, 123) - require.Equal(t, 0, n) - require.NoError(t, err) - - var p [10]byte - n, err = f.ReadAt(p[:], 0) - require.Equal(t, 0, n) - require.Equal(t, io.EOF, err) - - require.NoError(t, f.Close()) - }) -} diff --git a/pkg/filesystem/virtual/BUILD.bazel b/pkg/filesystem/virtual/BUILD.bazel index 25a53d84..72aa05fb 100644 --- a/pkg/filesystem/virtual/BUILD.bazel +++ b/pkg/filesystem/virtual/BUILD.bazel @@ -44,8 +44,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/cas", - "//pkg/filesystem", "//pkg/filesystem/access", + "//pkg/filesystem/filepool", "//pkg/proto/bazeloutputservice", "//pkg/proto/bazeloutputservice/rev2", "//pkg/proto/outputpathpersistency", diff --git a/pkg/filesystem/virtual/pool_backed_file_allocator.go b/pkg/filesystem/virtual/pool_backed_file_allocator.go index 1e2c5d00..50c1cf77 100644 --- a/pkg/filesystem/virtual/pool_backed_file_allocator.go +++ b/pkg/filesystem/virtual/pool_backed_file_allocator.go @@ -9,7 +9,7 @@ import ( "time" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" - re_filesystem "github.com/buildbarn/bb-remote-execution/pkg/filesystem" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/filepool" "github.com/buildbarn/bb-remote-execution/pkg/proto/bazeloutputservice" bazeloutputservicerev2 "github.com/buildbarn/bb-remote-execution/pkg/proto/bazeloutputservice/rev2" "github.com/buildbarn/bb-storage/pkg/blobstore" @@ -45,7 +45,7 @@ var ( ) type poolBackedFileAllocator struct { - pool re_filesystem.FilePool + pool filepool.FilePool errorLogger util.ErrorLogger } @@ -58,7 +58,7 @@ type poolBackedFileAllocator struct { // file descriptor count reach zero), Close() is called on the // underlying backing file descriptor. This may be used to request // deletion from underlying storage. -func NewPoolBackedFileAllocator(pool re_filesystem.FilePool, errorLogger util.ErrorLogger) FileAllocator { +func NewPoolBackedFileAllocator(pool filepool.FilePool, errorLogger util.ErrorLogger) FileAllocator { poolBackedFileAllocatorPrometheusMetrics.Do(func() { prometheus.MustRegister(poolBackedFileAllocatorWritableFileUploadDelaySeconds) prometheus.MustRegister(poolBackedFileAllocatorWritableFileUploadDelayTimeouts) @@ -71,7 +71,7 @@ func NewPoolBackedFileAllocator(pool re_filesystem.FilePool, errorLogger util.Er } func (fa *poolBackedFileAllocator) NewFile(isExecutable bool, size uint64, shareAccess ShareMask) (LinkableLeaf, Status) { - file, err := fa.pool.NewFile() + file, err := fa.pool.NewFile(nil, 0) if err != nil { fa.errorLogger.Log(util.StatusWrapf(err, "Failed to create new file")) return nil, StatusErrIO diff --git a/pkg/filesystem/virtual/pool_backed_file_allocator_test.go b/pkg/filesystem/virtual/pool_backed_file_allocator_test.go index a14fbef3..ba559e29 100644 --- a/pkg/filesystem/virtual/pool_backed_file_allocator_test.go +++ b/pkg/filesystem/virtual/pool_backed_file_allocator_test.go @@ -32,7 +32,7 @@ func TestPoolBackedFileAllocatorGetBazelOutputServiceStat(t *testing.T) { // Create a file and initialize it with some contents. pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) errorLogger := mock.NewMockErrorLogger(ctrl) f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). @@ -201,7 +201,7 @@ func TestPoolBackedFileAllocatorVirtualSeek(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) errorLogger := mock.NewMockErrorLogger(ctrl) f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). @@ -268,7 +268,7 @@ func TestPoolBackedFileAllocatorVirtualOpenSelfStaleAfterUnlink(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) underlyingFile.EXPECT().Close() errorLogger := mock.NewMockErrorLogger(ctrl) @@ -293,7 +293,7 @@ func TestPoolBackedFileAllocatorVirtualOpenSelfStaleAfterClose(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) underlyingFile.EXPECT().Close() errorLogger := mock.NewMockErrorLogger(ctrl) @@ -315,7 +315,7 @@ func TestPoolBackedFileAllocatorVirtualRead(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) errorLogger := mock.NewMockErrorLogger(ctrl) f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). @@ -390,7 +390,7 @@ func TestPoolBackedFileAllocatorFUSETruncateFailure(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) underlyingFile.EXPECT().Truncate(int64(42)).Return(status.Error(codes.Unavailable, "Storage backends offline")) underlyingFile.EXPECT().Close() @@ -417,7 +417,7 @@ func TestPoolBackedFileAllocatorVirtualWriteFailure(t *testing.T) { pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) var p [10]byte underlyingFile.EXPECT().WriteAt(p[:], int64(42)).Return(0, status.Error(codes.Unavailable, "Storage backends offline")) underlyingFile.EXPECT().Close() @@ -440,7 +440,7 @@ func TestPoolBackedFileAllocatorUploadFile(t *testing.T) { // Create a file backed by a FilePool. pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) errorLogger := mock.NewMockErrorLogger(ctrl) f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). @@ -610,7 +610,7 @@ func TestPoolBackedFileAllocatorVirtualClose(t *testing.T) { // Create a new file. pool := mock.NewMockFilePool(ctrl) underlyingFile := mock.NewMockFileReadWriter(ctrl) - pool.EXPECT().NewFile().Return(underlyingFile, nil) + pool.EXPECT().NewFile(nil, uint64(0)).Return(underlyingFile, nil) errorLogger := mock.NewMockErrorLogger(ctrl) f, s := virtual.NewPoolBackedFileAllocator(pool, errorLogger). diff --git a/pkg/proto/configuration/filesystem/BUILD.bazel b/pkg/proto/configuration/filesystem/BUILD.bazel index eda00b89..69e6725f 100644 --- a/pkg/proto/configuration/filesystem/BUILD.bazel +++ b/pkg/proto/configuration/filesystem/BUILD.bazel @@ -6,10 +6,7 @@ proto_library( name = "filesystem_proto", srcs = ["filesystem.proto"], visibility = ["//visibility:public"], - deps = [ - "@com_github_buildbarn_bb_storage//pkg/proto/configuration/blockdevice:blockdevice_proto", - "@protobuf//:empty_proto", - ], + deps = ["@com_github_buildbarn_bb_storage//pkg/proto/configuration/blockdevice:blockdevice_proto"], ) go_proto_library( diff --git a/pkg/proto/configuration/filesystem/filesystem.pb.go b/pkg/proto/configuration/filesystem/filesystem.pb.go index 70770b9c..6bd6aded 100644 --- a/pkg/proto/configuration/filesystem/filesystem.pb.go +++ b/pkg/proto/configuration/filesystem/filesystem.pb.go @@ -10,7 +10,6 @@ import ( blockdevice "github.com/buildbarn/bb-storage/pkg/proto/configuration/blockdevice" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" sync "sync" unsafe "unsafe" @@ -27,8 +26,6 @@ type FilePoolConfiguration struct { state protoimpl.MessageState `protogen:"open.v1"` // Types that are valid to be assigned to Backend: // - // *FilePoolConfiguration_InMemory - // *FilePoolConfiguration_DirectoryPath // *FilePoolConfiguration_BlockDevice Backend isFilePoolConfiguration_Backend `protobuf_oneof:"backend"` unknownFields protoimpl.UnknownFields @@ -72,24 +69,6 @@ func (x *FilePoolConfiguration) GetBackend() isFilePoolConfiguration_Backend { return nil } -func (x *FilePoolConfiguration) GetInMemory() *emptypb.Empty { - if x != nil { - if x, ok := x.Backend.(*FilePoolConfiguration_InMemory); ok { - return x.InMemory - } - } - return nil -} - -func (x *FilePoolConfiguration) GetDirectoryPath() string { - if x != nil { - if x, ok := x.Backend.(*FilePoolConfiguration_DirectoryPath); ok { - return x.DirectoryPath - } - } - return "" -} - func (x *FilePoolConfiguration) GetBlockDevice() *blockdevice.Configuration { if x != nil { if x, ok := x.Backend.(*FilePoolConfiguration_BlockDevice); ok { @@ -103,34 +82,20 @@ type isFilePoolConfiguration_Backend interface { isFilePoolConfiguration_Backend() } -type FilePoolConfiguration_InMemory struct { - InMemory *emptypb.Empty `protobuf:"bytes,1,opt,name=in_memory,json=inMemory,proto3,oneof"` -} - -type FilePoolConfiguration_DirectoryPath struct { - DirectoryPath string `protobuf:"bytes,2,opt,name=directory_path,json=directoryPath,proto3,oneof"` -} - type FilePoolConfiguration_BlockDevice struct { BlockDevice *blockdevice.Configuration `protobuf:"bytes,3,opt,name=block_device,json=blockDevice,proto3,oneof"` } -func (*FilePoolConfiguration_InMemory) isFilePoolConfiguration_Backend() {} - -func (*FilePoolConfiguration_DirectoryPath) isFilePoolConfiguration_Backend() {} - func (*FilePoolConfiguration_BlockDevice) isFilePoolConfiguration_Backend() {} var File_pkg_proto_configuration_filesystem_filesystem_proto protoreflect.FileDescriptor const file_pkg_proto_configuration_filesystem_filesystem_proto_rawDesc = "" + "\n" + - "3pkg/proto/configuration/filesystem/filesystem.proto\x12\"buildbarn.configuration.filesystem\x1a\x1bgoogle/protobuf/empty.proto\x1a5pkg/proto/configuration/blockdevice/blockdevice.proto\"\xdb\x01\n" + - "\x15FilePoolConfiguration\x125\n" + - "\tin_memory\x18\x01 \x01(\v2\x16.google.protobuf.EmptyH\x00R\binMemory\x12'\n" + - "\x0edirectory_path\x18\x02 \x01(\tH\x00R\rdirectoryPath\x12W\n" + + "3pkg/proto/configuration/filesystem/filesystem.proto\x12\"buildbarn.configuration.filesystem\x1a5pkg/proto/configuration/blockdevice/blockdevice.proto\"\x87\x01\n" + + "\x15FilePoolConfiguration\x12W\n" + "\fblock_device\x18\x03 \x01(\v22.buildbarn.configuration.blockdevice.ConfigurationH\x00R\vblockDeviceB\t\n" + - "\abackendBMZKgithub.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystemb\x06proto3" + "\abackendJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03BMZKgithub.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystemb\x06proto3" var ( file_pkg_proto_configuration_filesystem_filesystem_proto_rawDescOnce sync.Once @@ -147,17 +112,15 @@ func file_pkg_proto_configuration_filesystem_filesystem_proto_rawDescGZIP() []by var file_pkg_proto_configuration_filesystem_filesystem_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_pkg_proto_configuration_filesystem_filesystem_proto_goTypes = []any{ (*FilePoolConfiguration)(nil), // 0: buildbarn.configuration.filesystem.FilePoolConfiguration - (*emptypb.Empty)(nil), // 1: google.protobuf.Empty - (*blockdevice.Configuration)(nil), // 2: buildbarn.configuration.blockdevice.Configuration + (*blockdevice.Configuration)(nil), // 1: buildbarn.configuration.blockdevice.Configuration } var file_pkg_proto_configuration_filesystem_filesystem_proto_depIdxs = []int32{ - 1, // 0: buildbarn.configuration.filesystem.FilePoolConfiguration.in_memory:type_name -> google.protobuf.Empty - 2, // 1: buildbarn.configuration.filesystem.FilePoolConfiguration.block_device:type_name -> buildbarn.configuration.blockdevice.Configuration - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 1, // 0: buildbarn.configuration.filesystem.FilePoolConfiguration.block_device:type_name -> buildbarn.configuration.blockdevice.Configuration + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_pkg_proto_configuration_filesystem_filesystem_proto_init() } @@ -166,8 +129,6 @@ func file_pkg_proto_configuration_filesystem_filesystem_proto_init() { return } file_pkg_proto_configuration_filesystem_filesystem_proto_msgTypes[0].OneofWrappers = []any{ - (*FilePoolConfiguration_InMemory)(nil), - (*FilePoolConfiguration_DirectoryPath)(nil), (*FilePoolConfiguration_BlockDevice)(nil), } type x struct{} diff --git a/pkg/proto/configuration/filesystem/filesystem.proto b/pkg/proto/configuration/filesystem/filesystem.proto index e16c4534..3841970b 100644 --- a/pkg/proto/configuration/filesystem/filesystem.proto +++ b/pkg/proto/configuration/filesystem/filesystem.proto @@ -2,20 +2,23 @@ syntax = "proto3"; package buildbarn.configuration.filesystem; -import "google/protobuf/empty.proto"; import "pkg/proto/configuration/blockdevice/blockdevice.proto"; option go_package = "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem"; message FilePoolConfiguration { - oneof backend { - // Store all temporary files in memory. - google.protobuf.Empty in_memory = 1; + // Was 'in_memory'. This filepool configuration did not have support + // for sparse files and performed poorly when memory constrained. It + // has been removed. + reserved 1; - // Store all temporary files in a single directory on a file system. - // This option denotes the path of this directory. - string directory_path = 2; + // Was 'directory_path'. Write operations were implemented by opening + // the file, writing to it, and closing it. Reading was implemented + // similarly. This led to significant performance overhead. Use + // 'block_device' backed by a single file instead. + reserved 2; + oneof backend { // Store all temporary files in a single file on a file system or on // a raw block device. buildbarn.configuration.blockdevice.Configuration block_device = 3;