diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index eeba0640dc6..de82817c70e 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1 +1 @@ -custom: [ipshipyard.gitwallet.co] +github: [ipshipyard] diff --git a/.github/workflows/docker-check.yml b/.github/workflows/docker-check.yml index 88415505059..997a0fb7b8d 100644 --- a/.github/workflows/docker-check.yml +++ b/.github/workflows/docker-check.yml @@ -30,6 +30,20 @@ jobs: verbose: true format: tty + # Guard rail: the Dockerfile ARG default is what `docker build .` uses + # locally without --build-arg. CI overrides it from go.mod, but the + # default must stay in sync so local builds and the published image use + # the same Go toolchain. + - name: Verify Dockerfile GO_VERSION default matches go.mod + run: | + GO_MOD_VERSION=$(awk '/^go [0-9]/ {print $2; exit}' go.mod) + DOCKERFILE_VERSION=$(awk -F= '/^ARG GO_VERSION=/ {print $2; exit}' Dockerfile) + if [ "$GO_MOD_VERSION" != "$DOCKERFILE_VERSION" ]; then + echo "::error file=Dockerfile::go.mod has 'go ${GO_MOD_VERSION}' but Dockerfile default ARG GO_VERSION=${DOCKERFILE_VERSION}. Update the Dockerfile default to match go.mod." + exit 1 + fi + echo "OK: both pinned to ${GO_MOD_VERSION}" + build: if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' runs-on: ubuntu-latest @@ -42,17 +56,25 @@ jobs: shell: bash steps: - uses: actions/checkout@v6 - + - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - + uses: docker/setup-buildx-action@v4 + + # Mirror the publish workflow: pull the Go version from go.mod so the + # PR check builds with the same toolchain that the published image uses. + - name: Read Go version from go.mod + id: go + run: echo "version=$(awk '/^go [0-9]/ {print $2; exit}' go.mod)" >> "$GITHUB_OUTPUT" + - name: Build Docker image with BuildKit - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: context: . push: false load: true tags: ${{ env.IMAGE_NAME }}:${{ env.WIP_IMAGE_TAG }} + build-args: | + GO_VERSION=${{ steps.go.outputs.version }} cache-from: | type=gha type=registry,ref=${{ env.IMAGE_NAME }}:buildcache diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 39eaf52f443..a36a64f9f84 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -46,13 +46,13 @@ jobs: uses: actions/checkout@v6 - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@v4 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: Log in to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: username: ${{ vars.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} @@ -66,11 +66,17 @@ jobs: echo "EOF" >> $GITHUB_OUTPUT shell: bash + # Read the Go version from go.mod so the Docker image is built with the + # exact same toolchain that setup-go installs in the rest of CI. + - name: Read Go version from go.mod + id: go + run: echo "version=$(awk '/^go [0-9]/ {print $2; exit}' go.mod)" >> "$GITHUB_OUTPUT" + # We have to build each platform separately because when using multi-arch # builds, only one platform is being loaded into the cache. This would # prevent us from testing the other platforms. - name: Build Docker image (linux/amd64) - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: platforms: linux/amd64 context: . @@ -78,13 +84,15 @@ jobs: load: true file: ./Dockerfile tags: ${{ env.IMAGE_NAME }}:linux-amd64 + build-args: | + GO_VERSION=${{ steps.go.outputs.version }} cache-from: | type=gha type=registry,ref=${{ env.IMAGE_NAME }}:buildcache cache-to: type=gha,mode=max - name: Build Docker image (linux/arm/v7) - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: platforms: linux/arm/v7 context: . @@ -92,13 +100,15 @@ jobs: load: true file: ./Dockerfile tags: ${{ env.IMAGE_NAME }}:linux-arm-v7 + build-args: | + GO_VERSION=${{ steps.go.outputs.version }} cache-from: | type=gha type=registry,ref=${{ env.IMAGE_NAME }}:buildcache cache-to: type=gha,mode=max - name: Build Docker image (linux/arm64/v8) - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: platforms: linux/arm64/v8 context: . @@ -106,6 +116,8 @@ jobs: load: true file: ./Dockerfile tags: ${{ env.IMAGE_NAME }}:linux-arm64-v8 + build-args: | + GO_VERSION=${{ steps.go.outputs.version }} cache-from: | type=gha type=registry,ref=${{ env.IMAGE_NAME }}:buildcache @@ -128,13 +140,15 @@ jobs: # This will only push the previously built images. - if: github.event_name != 'workflow_dispatch' || github.event.inputs.push == 'true' name: Publish to Docker Hub - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: platforms: linux/amd64,linux/arm/v7,linux/arm64/v8 context: . push: true file: ./Dockerfile tags: "${{ github.event.inputs.tags || steps.tags.outputs.value }}" + build-args: | + GO_VERSION=${{ steps.go.outputs.version }} cache-from: | type=gha type=registry,ref=${{ env.IMAGE_NAME }}:buildcache diff --git a/.github/workflows/gateway-conformance.yml b/.github/workflows/gateway-conformance.yml index 8efc41424db..20ec4c7ffda 100644 --- a/.github/workflows/gateway-conformance.yml +++ b/.github/workflows/gateway-conformance.yml @@ -41,7 +41,7 @@ jobs: steps: # 1. Download the gateway-conformance fixtures - name: Download gateway-conformance fixtures - uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.10 + uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.13 with: output: fixtures @@ -93,7 +93,7 @@ jobs: # 6. Run the gateway-conformance tests - name: Run gateway-conformance tests - uses: ipfs/gateway-conformance/.github/actions/test@v0.10 + uses: ipfs/gateway-conformance/.github/actions/test@v0.13 with: gateway-url: http://127.0.0.1:8080 subdomain-url: http://localhost:8080 @@ -109,13 +109,13 @@ jobs: run: cat output.md >> $GITHUB_STEP_SUMMARY - name: Upload HTML report if: failure() || success() - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: gateway-conformance.html path: output.html - name: Upload JSON report if: failure() || success() - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: gateway-conformance.json path: output.json @@ -127,7 +127,7 @@ jobs: steps: # 1. Download the gateway-conformance fixtures - name: Download gateway-conformance fixtures - uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.10 + uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.13 with: output: fixtures @@ -199,7 +199,7 @@ jobs: # 9. Run the gateway-conformance tests over libp2p - name: Run gateway-conformance tests over libp2p - uses: ipfs/gateway-conformance/.github/actions/test@v0.10 + uses: ipfs/gateway-conformance/.github/actions/test@v0.13 with: gateway-url: http://127.0.0.1:8092 args: --specs "trustless-gateway,-trustless-ipns-gateway" -skip 'TestGatewayCar/GET_response_for_application/vnd.ipld.car/Header_Content-Length' @@ -214,13 +214,13 @@ jobs: run: cat output.md >> $GITHUB_STEP_SUMMARY - name: Upload HTML report if: failure() || success() - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: gateway-conformance-libp2p.html path: output.html - name: Upload JSON report if: failure() || success() - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: gateway-conformance-libp2p.json path: output.json diff --git a/.github/workflows/gotest.yml b/.github/workflows/gotest.yml index 8165eb12a3c..f40d2172e6b 100644 --- a/.github/workflows/gotest.yml +++ b/.github/workflows/gotest.yml @@ -43,7 +43,7 @@ jobs: make test_unit && [[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]] - name: Upload coverage to Codecov - uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2 + uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6.0.0 if: failure() || success() with: name: unittests @@ -57,7 +57,7 @@ jobs: output: test/unit/gotest.junit.xml if: failure() || success() - name: Archive the JUnit XML report - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: unit-tests-junit path: test/unit/gotest.junit.xml @@ -70,7 +70,7 @@ jobs: output: test/unit/gotest.html if: failure() || success() - name: Archive the HTML report - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: unit-tests-html path: test/unit/gotest.html @@ -120,7 +120,7 @@ jobs: output: test/cli/cli-tests.junit.xml if: failure() || success() - name: Archive JUnit XML report - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: cli-tests-junit path: test/cli/cli-tests.junit.xml @@ -133,7 +133,7 @@ jobs: output: test/cli/cli-tests.html if: failure() || success() - name: Archive HTML report - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: cli-tests-html path: test/cli/cli-tests.html @@ -149,6 +149,59 @@ jobs: run: cat test/cli/cli-tests.md >> $GITHUB_STEP_SUMMARY if: failure() || success() + # FUSE filesystem tests (require /dev/fuse and fusermount) + # Runs both FUSE unit tests (./fuse/...) and CLI integration tests (./test/cli/fuse/...) + fuse-tests: + if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' + runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }} + concurrency: + group: fuse-tests-${{ github.repository }} + cancel-in-progress: false + # A normal run takes ~3min. 6min gives roughly 2x and lets Go's 4min + # test timeout fire first (printing a stack trace) on a hang, instead + # of GitHub silently cancelling the job. + timeout-minutes: 6 + env: + # Dump all goroutines on a test panic, not just the panicking one, + # so we can see which test is actually hung. + GOTRACEBACK: all + TEST_FUSE: 1 + defaults: + run: + shell: bash + steps: + - name: Check out Kubo + uses: actions/checkout@v6 + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version-file: 'go.mod' + - name: Install FUSE + run: | + if ! command -v fusermount3 &>/dev/null && ! command -v fusermount &>/dev/null; then + sudo apt-get update + sudo apt-get install -y fuse3 + fi + - name: Clean up stale FUSE mounts + run: | + # On shared self-hosted runners, leftover mounts from previous + # runs can exhaust the kernel FUSE mount limit (mount_max). + # Unit tests mount with FsName "kubo-test"; CLI tests mount + # under the harness temp dir (ipfs/ipns/mfs subdirectories). + awk '$1 == "kubo-test" || $2 ~ /\/tmp\/.*\/(ipfs|ipns|mfs)$/ { print $2 }' /proc/mounts 2>/dev/null \ + | while read -r mp; do + fusermount3 -uz "$mp" 2>/dev/null || fusermount -uz "$mp" 2>/dev/null || true + done + - name: Run FUSE tests + run: make test_fuse + - name: Clean up FUSE mounts + if: always() + run: | + awk '$1 == "kubo-test" || $2 ~ /\/tmp\/.*\/(ipfs|ipns|mfs)$/ { print $2 }' /proc/mounts 2>/dev/null \ + | while read -r mp; do + fusermount3 -uz "$mp" 2>/dev/null || fusermount -uz "$mp" 2>/dev/null || true + done + # Example tests (kubo-as-a-library) example-tests: if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' diff --git a/.github/workflows/interop.yml b/.github/workflows/interop.yml index 0ba22576466..0677f654d1b 100644 --- a/.github/workflows/interop.yml +++ b/.github/workflows/interop.yml @@ -51,7 +51,7 @@ jobs: with: go-version-file: 'go.mod' - run: make build - - uses: actions/upload-artifact@v6 + - uses: actions/upload-artifact@v7 with: name: kubo path: cmd/ipfs/ipfs @@ -66,7 +66,7 @@ jobs: - uses: actions/setup-node@v6 with: node-version: lts/* - - uses: actions/download-artifact@v7 + - uses: actions/download-artifact@v8 with: name: kubo path: cmd/ipfs @@ -122,7 +122,7 @@ jobs: run: shell: bash steps: - - uses: actions/download-artifact@v7 + - uses: actions/download-artifact@v8 with: name: kubo path: cmd/ipfs @@ -196,7 +196,7 @@ jobs: working-directory: ipfs-webui - name: Upload test artifacts on failure if: failure() - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: webui-test-results path: ipfs-webui/test-results/ diff --git a/.github/workflows/sharness.yml b/.github/workflows/sharness.yml index ac32bf3a43e..357c6a1027b 100644 --- a/.github/workflows/sharness.yml +++ b/.github/workflows/sharness.yml @@ -55,7 +55,7 @@ jobs: # increasing parallelism beyond 10 doesn't speed up the tests much PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }} - name: Upload coverage report - uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2 + uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6.0.0 if: failure() || success() with: name: sharness @@ -90,7 +90,7 @@ jobs: destination: sharness.html - name: Upload one-page HTML report if: github.repository != 'ipfs/kubo' && (failure() || success()) - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: sharness.html path: kubo/test/sharness/test-results/sharness.html @@ -110,7 +110,7 @@ jobs: destination: sharness-html/ - name: Upload full HTML report if: github.repository != 'ipfs/kubo' && (failure() || success()) - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: sharness-html path: kubo/test/sharness/test-results/sharness-html diff --git a/.github/workflows/test-migrations.yml b/.github/workflows/test-migrations.yml index 35fcbe729a8..64d5f7eed01 100644 --- a/.github/workflows/test-migrations.yml +++ b/.github/workflows/test-migrations.yml @@ -1,4 +1,4 @@ -name: Migrations +name: Migrations & Update on: workflow_dispatch: @@ -9,6 +9,9 @@ on: - 'test/cli/migrations/**' # Config and repo handling - 'repo/fsrepo/**' + # Update command + - 'core/commands/update*.go' + - 'test/cli/update_test.go' # This workflow file itself - '.github/workflows/test-migrations.yml' push: @@ -19,6 +22,8 @@ on: - 'repo/fsrepo/migrations/**' - 'test/cli/migrations/**' - 'repo/fsrepo/**' + - 'core/commands/update*.go' + - 'test/cli/update_test.go' - '.github/workflows/test-migrations.yml' concurrency: @@ -75,11 +80,34 @@ jobs: ipfs version || echo "Failed to run ipfs version" go test ./test/cli/migrations/... + # GitHub's macOS runners occasionally lose DNS for api.github.com, + # which breaks the real-network subtests in TestUpdate (see run + # 24222365595). Point the resolver at Cloudflare and Google so the + # runner is insulated from flaky upstream DNS. + - name: Configure DNS (macOS) + if: runner.os == 'macOS' + run: | + networksetup -listallnetworkservices | tail -n +2 | while read -r svc; do + sudo networksetup -setdnsservers "$svc" 1.1.1.1 8.8.8.8 || true + done + sudo dscacheutil -flushcache + sudo killall -HUP mDNSResponder || true + scutil --dns | head -20 || true + + - name: Run CLI update tests + env: + IPFS_PATH: ${{ runner.temp }}/ipfs-update-test + GITHUB_TOKEN: ${{ github.token }} + run: | + export PATH="${{ github.workspace }}/cmd/ipfs:$PATH" + go test -run "TestUpdate" ./test/cli/... + - name: Upload test results if: always() - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: ${{ matrix.os }}-test-results path: | test/**/*.log ${{ runner.temp }}/ipfs-test/ + ${{ runner.temp }}/ipfs-update-test/ diff --git a/.gitignore b/.gitignore index 890870a6ee5..09c29ed3d09 100644 --- a/.gitignore +++ b/.gitignore @@ -28,10 +28,12 @@ go-ipfs-source.tar.gz docs/examples/go-ipfs-as-a-library/example-folder/Qm* /test/sharness/t0054-dag-car-import-export-data/*.car -# test artifacts from make test_unit / test_cli +# test artifacts from make test_unit / test_cli / test_fuse /test/unit/gotest.json /test/unit/gotest.junit.xml /test/cli/cli-tests.json +/test/fuse/fuse-unit-tests.json +/test/fuse/fuse-cli-tests.json # ignore build output from snapcraft /ipfs_*.snap diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000000..637c2dafb34 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,232 @@ +# AI Agent Instructions for Kubo + +This file provides instructions for AI coding agents working on the [Kubo](https://github.com/ipfs/kubo) codebase (the Go implementation of IPFS). Follow the [Developer Guide](docs/developer-guide.md) for full details. + +## Quick Reference + +| Task | Command | +|-------------------|----------------------------------------------------------| +| Tidy deps | `make mod_tidy` (run first if `go.mod` changed) | +| Build | `make build` | +| Unit tests | `go test ./... -run TestName -v` | +| Integration tests | `make build && go test ./test/cli/... -run TestName -v` | +| Lint | `make -O test_go_lint` | +| Format | `go fmt ./...` | + +## Project Overview + +Kubo is the reference implementation of IPFS in Go. Most IPFS protocol logic lives in [boxo](https://github.com/ipfs/boxo) (the IPFS SDK); kubo wires it together and exposes it via CLI and HTTP RPC API. If a change belongs in the protocol layer, it likely belongs in boxo, not here. + +Key directories: + +| Directory | Purpose | +|--------------------|----------------------------------------------------------| +| `cmd/ipfs/` | CLI entry point and binary | +| `core/` | core IPFS node implementation | +| `core/commands/` | CLI command definitions | +| `core/coreapi/` | Go API implementation | +| `client/rpc/` | HTTP RPC client | +| `plugin/` | plugin system | +| `repo/` | repository management | +| `test/cli/` | Go-based CLI integration tests (preferred for new tests) | +| `test/sharness/` | legacy shell-based integration tests | +| `docs/` | documentation | + +Other key external dependencies: [go-libp2p](https://github.com/libp2p/go-libp2p) (networking), [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht) (DHT). + +## Go Style + +Follow these Go style references: + +- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +- [Google Go Style Decisions](https://google.github.io/styleguide/go/decisions) + +Specific conventions for this project: + +- check the Go version in `go.mod` and use idiomatic features available at that version +- readability over micro-optimization: clear code is more important than saving microseconds +- prefer standard library functions and utilities over writing your own +- use early returns and indent the error flow, not the happy path +- use `slices.Contains`, `slices.DeleteFunc`, and the `maps` package instead of manual loops +- preallocate slices and maps when the size is known: `make([]T, 0, n)` +- use `map[K]struct{}` for sets, not `map[K]bool` +- receiver names: single-letter abbreviations matching the type (e.g., `s *Server`, `c *Client`) +- run `go fmt` after modifying Go source files, never indent manually + +### Error Handling + +- wrap errors with `fmt.Errorf("context: %w", err)`, never discard errors silently +- use `errors.Is` / `errors.As` for error checking, not string comparison +- never use `panic` in library code; only in `main` or test helpers +- return `nil` explicitly for the error value on success paths + +### Canonical Examples + +When adding or modifying code, follow the patterns established in these files: + +- CLI command structure: `core/commands/dag/dag.go` +- CLI integration test: `test/cli/dag_test.go` +- Test harness usage: `test/cli/harness/` package + +## Building + +Always run commands from the repository root. + +```bash +make mod_tidy # update go.mod/go.sum (use this instead of go mod tidy) +make build # build the ipfs binary to cmd/ipfs/ipfs +make install # install to $GOPATH/bin +make -O test_go_lint # run linter (use this instead of golangci-lint directly) +``` + +If you modify `go.mod` (add/remove/update dependencies), you must run `make mod_tidy` first, before building or testing. Use `make mod_tidy` instead of `go mod tidy` directly, as the project has multiple `go.mod` files. + +If you modify any `.go` files outside of `test/`, you must run `make build` before running integration tests. + +## Testing + +The full test suite is composed of several targets: + +| Make target | What it runs | +|----------------------|-----------------------------------------------------------------------| +| `make test` | all tests (`test_go_fmt` + `test_unit` + `test_cli` + `test_sharness`) | +| `make test_short` | fast subset (`test_go_fmt` + `test_unit`) | +| `make test_unit` | unit tests with coverage (excludes `test/cli`) | +| `make test_cli` | CLI integration tests (requires `make build` first) | +| `make test_fuse` | FUSE filesystem tests (requires `/dev/fuse` and `fusermount` in PATH) | +| `make test_sharness` | legacy shell-based integration tests | +| `make test_go_fmt` | checks Go source formatting | +| `make -O test_go_lint` | runs `golangci-lint` | + +During development, prefer running a specific test rather than the full suite: + +```bash +# run a single unit test +go test ./core/... -run TestSpecificUnit -v + +# run a single CLI integration test (requires make build first) +go test ./test/cli/... -run TestSpecificCLI -v +``` + +### Environment Setup for Integration Tests + +Before running `test_cli` or `test_sharness`, set these environment variables from the repo root: + +```bash +export PATH="$PWD/cmd/ipfs:$PATH" +export IPFS_PATH="$(mktemp -d)" +``` + +- `PATH`: integration tests use the `ipfs` binary from `PATH`, not Go source directly +- `IPFS_PATH`: isolates test data from `~/.ipfs` or other running nodes + +If you see "version (N) is lower than repos (M)", the `ipfs` binary in `PATH` is outdated. Rebuild with `make build` and verify `PATH`. + +### Running FUSE Tests + +FUSE tests require `/dev/fuse` and `fusermount` in `PATH`. On systems with only fuse3, create a symlink in a temp directory (never use `sudo` to install system-wide): + +```bash +FUSE_BIN="$(mktemp -d)" && ln -s /usr/bin/fusermount3 "$FUSE_BIN/fusermount" && PATH="$FUSE_BIN:$PATH" make test_fuse +``` + +Set `TEST_FUSE=1` to make mount failures fatal (CI does this). Without it, tests auto-detect and skip when FUSE is unavailable. + +### Running Sharness Tests + +Sharness tests are legacy shell-based tests. Run individual tests with a timeout: + +```bash +cd test/sharness && timeout 60s ./t0080-repo.sh +``` + +To investigate a failing test, pass `-v` for verbose output. In this mode, daemons spawned by the test are not shut down automatically and must be killed manually afterwards. + +### Cleaning Up Stale Daemons + +Before running `test/cli` or `test/sharness`, stop any stale `ipfs daemon` processes owned by the current user. Leftover daemons hold locks and bind ports, causing test failures: + +```bash +pkill -f "ipfs daemon" +``` + +### Writing Tests + +- all new integration tests go in `test/cli/`, not `test/sharness/` +- if a `test/sharness` test needs significant changes, remove it and add a replacement in `test/cli/` +- use [testify](https://github.com/stretchr/testify) for assertions (already a dependency) +- use `t.Context()` instead of `context.Background()` in tests +- for Go 1.25+, use `testing/synctest` when testing concurrent code (goroutines, channels, timers) +- reuse existing `.car` fixtures in `test/cli/fixtures/` when possible; only add new fixtures when the test requires data not covered by existing ones +- when writing tests that cover CIDv0 vs CIDv1, always set the CID version explicitly (never rely on defaults); if chunk size matters for the test, also set the chunker explicitly +- always re-run modified tests locally before submitting to confirm they pass +- avoid emojis in test names and test log output + +## Before Submitting + +Run these steps in order before considering work complete: + +1. `make mod_tidy` (if `go.mod` changed) +2. `go fmt ./...` +3. `make build` (if non-test `.go` files changed) +4. `make -O test_go_lint` +5. `go test ./...` (or the relevant subset) + +## Documentation and Commit Messages + +- after editing CLI help text in `core/commands/`, verify width: `go test ./test/cli/... -run TestCommandDocsWidth` +- config options are documented in `docs/config.md` +- changelogs in `docs/changelogs/`: only edit the Table of Contents and the Highlights section; the Changelog and Contributors sections are auto-generated and must not be modified +- avoid unnecessary line wrapping in `docs/changelogs/*`; let lines be long +- follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) +- keep commit titles short and messages terse + +## Writing Style + +When writing docs, comments, and commit messages: + +- avoid emojis in code, comments, and log output +- keep an empty line before lists in markdown +- use backticks around CLI commands, paths, environment variables, and config options + +## PR Guidelines + +- explain what changed and why in the PR description +- include test coverage for new functionality and bug fixes +- run `make -O test_go_lint` and fix any lint issues before submitting +- verify that `go test ./...` passes locally +- when modifying `test/sharness` tests significantly, migrate them to `test/cli` instead +- end the PR description with a `## References` section listing related context, one link per line +- if the PR closes an issue in `ipfs/kubo`, each closing reference should be a bullet starting with `Closes`: + +```markdown +## References + +- Closes https://github.com/ipfs/kubo/issues/1234 +- Closes https://github.com/ipfs/kubo/issues/5678 +- https://discuss.ipfs.tech/t/related-topic/999 +``` + +## Scope and Safety + +Do not modify or touch: + +- files under `test/sharness/lib/` (third-party sharness test framework) +- CI workflows in `.github/` unless explicitly asked +- auto-generated sections in `docs/changelogs/` (Changelog and Contributors are generated; only TOC and Highlights are human-edited) + +Do not run without being asked: + +- `make test` or `make test_sharness` (full suite is slow; prefer targeted tests) +- `ipfs daemon` without a timeout + +## Running the Daemon + +Always run the daemon with a timeout or shut it down promptly: + +```bash +timeout 60s ipfs daemon # auto-kill after 60s +ipfs shutdown # graceful shutdown via API +``` + +Kill dangling daemons before re-running tests: `pkill -f "ipfs daemon"` diff --git a/CHANGELOG.md b/CHANGELOG.md index 6bc565d8600..5e4a2c47c66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ # Kubo Changelogs +- [v0.42](docs/changelogs/v0.42.md) +- [v0.41](docs/changelogs/v0.41.md) - [v0.40](docs/changelogs/v0.40.md) - [v0.39](docs/changelogs/v0.39.md) - [v0.38](docs/changelogs/v0.38.md) diff --git a/Dockerfile b/Dockerfile index 6d43beefad1..ffc4553a3eb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,14 @@ # syntax=docker/dockerfile:1 # Enables BuildKit with cache mounts for faster builds -FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.25 AS builder + +# GO_VERSION is the source of truth for the Go toolchain version. CI parses +# the `go` directive from go.mod and overrides this build arg, so the +# published image always matches the version `setup-go` installs from go.mod. +# The default below is what `docker build .` (without --build-arg) uses, and +# `.github/workflows/docker-check.yml` lints that this default stays in sync +# with go.mod. When bumping Go, update both go.mod and this default together. +ARG GO_VERSION=1.26.2 +FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:${GO_VERSION} AS builder ARG TARGETOS TARGETARCH diff --git a/README.md b/README.md index c1eaf97487b..6cdcc4bb0bb 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,7 @@ Kubo was the first [IPFS](https://docs.ipfs.tech/concepts/what-is-ipfs/) impleme - [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval - [HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to control the daemon - [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) client and server for [delegated routing](./docs/delegated-routing.md) +- [FUSE mounts](./docs/fuse.md) for mounting `/ipfs`, `/ipns`, and `/mfs` as local filesystems (experimental) - [Content blocking](./docs/content-blocking.md) for public node operators **Other IPFS implementations:** [Helia](https://github.com/ipfs/helia) (JavaScript), [more...](https://docs.ipfs.tech/concepts/ipfs-implementations/) @@ -178,6 +179,7 @@ Kubo is available in community-maintained packages across many operating systems | [HTTP RPC clients](docs/http-rpc-clients.md) | Client libraries for Go, JS | | [Delegated routing](docs/delegated-routing.md) | Multi-router and HTTP routing | | [Metrics & monitoring](docs/metrics.md) | Prometheus metrics | +| [FUSE mounts](docs/fuse.md) | Mount `/ipfs`, `/ipns`, `/mfs` as local filesystems | | [Content blocking](docs/content-blocking.md) | Denylist for public nodes | | [Customizing](docs/customizing.md) | Unsure if use Plugins, Boxo, or fork? | | [Debug guide](docs/debug-guide.md) | CPU profiles, memory analysis, tracing | @@ -186,7 +188,7 @@ Kubo is available in community-maintained packages across many operating systems ## Development -See the [Developer Guide](docs/developer-guide.md) for build instructions, testing, and contribution workflow. +See the [Developer Guide](docs/developer-guide.md) for build instructions, testing, and contribution workflow. AI coding agents should follow [AGENTS.md](AGENTS.md). ## Getting Help diff --git a/Rules.mk b/Rules.mk index b04e3d73e73..418aec15b15 100644 --- a/Rules.mk +++ b/Rules.mk @@ -138,6 +138,7 @@ help: @echo ' test_short - Run fast tests (test_go_fmt, test_unit)' @echo ' test_unit - Run unit tests with coverage (excludes test/cli)' @echo ' test_cli - Run CLI integration tests (requires built binary)' + @echo ' test_fuse - Run FUSE tests (requires /dev/fuse and fusermount)' @echo ' test_go_fmt - Check Go source formatting' @echo ' test_go_build - Build kubo for all platforms from .github/build-platforms.yml' @echo ' test_go_lint - Run golangci-lint' diff --git a/bin/test-go-fmt b/bin/test-go-fmt index 3f69d83895a..dee8532844d 100755 --- a/bin/test-go-fmt +++ b/bin/test-go-fmt @@ -11,7 +11,7 @@ if [ -n "$(cat $T)" ]; then echo "-----------------------------------" cat "$T" echo "-----------------------------------" - echo "Run 'go fmt ./...' in your source directory" + echo "Run 'gofmt -s -w .' in your source directory" rm -f "$T" exit 1 fi diff --git a/client/rpc/object.go b/client/rpc/object.go index 5c9d323e87b..0de1dba9fbb 100644 --- a/client/rpc/object.go +++ b/client/rpc/object.go @@ -24,6 +24,7 @@ func (api *ObjectAPI) AddLink(ctx context.Context, base path.Path, name string, var out objectOut err = api.core().Request("object/patch/add-link", base.String(), name, child.String()). Option("create", options.Create). + Option("allow-non-unixfs", options.SkipUnixFSValidation). Exec(ctx, &out) if err != nil { return path.ImmutablePath{}, err @@ -37,9 +38,15 @@ func (api *ObjectAPI) AddLink(ctx context.Context, base path.Path, name string, return path.FromCid(c), nil } -func (api *ObjectAPI) RmLink(ctx context.Context, base path.Path, link string) (path.ImmutablePath, error) { +func (api *ObjectAPI) RmLink(ctx context.Context, base path.Path, link string, opts ...caopts.ObjectRmLinkOption) (path.ImmutablePath, error) { + options, err := caopts.ObjectRmLinkOptions(opts...) + if err != nil { + return path.ImmutablePath{}, err + } + var out objectOut - err := api.core().Request("object/patch/rm-link", base.String(), link). + err = api.core().Request("object/patch/rm-link", base.String(), link). + Option("allow-non-unixfs", options.SkipUnixFSValidation). Exec(ctx, &out) if err != nil { return path.ImmutablePath{}, err diff --git a/cmd/ipfs/kubo/daemon.go b/cmd/ipfs/kubo/daemon.go index f2ba598d0f0..1af9edd338c 100644 --- a/cmd/ipfs/kubo/daemon.go +++ b/cmd/ipfs/kubo/daemon.go @@ -1239,9 +1239,11 @@ func mountFuse(req *cmds.Request, cctx *oldcmds.Context) error { if err != nil { return err } + // Extra space after "MFS" so "mounted at:" lines up with IPFS and + // IPNS in the column above. Matches MountCmd's output formatter. fmt.Printf("IPFS mounted at: %s\n", fsdir) fmt.Printf("IPNS mounted at: %s\n", nsdir) - fmt.Printf("MFS mounted at: %s\n", mfsdir) + fmt.Printf("MFS mounted at: %s\n", mfsdir) return nil } diff --git a/cmd/ipfs/kubo/daemon_linux.go b/cmd/ipfs/kubo/daemon_linux.go index 2335dd2b93d..8fb050e4387 100644 --- a/cmd/ipfs/kubo/daemon_linux.go +++ b/cmd/ipfs/kubo/daemon_linux.go @@ -1,3 +1,4 @@ +// Systemd readiness notification (sd_notify). Linux only. //go:build linux package kubo diff --git a/cmd/ipfs/kubo/daemon_other.go b/cmd/ipfs/kubo/daemon_other.go index 6fbc302591b..b28ec2d427c 100644 --- a/cmd/ipfs/kubo/daemon_other.go +++ b/cmd/ipfs/kubo/daemon_other.go @@ -1,3 +1,4 @@ +// No-op readiness notification on non-Linux platforms. //go:build !linux package kubo diff --git a/cmd/ipfs/runmain_test.go b/cmd/ipfs/runmain_test.go index 56a647f8a81..d8cc8b0e353 100644 --- a/cmd/ipfs/runmain_test.go +++ b/cmd/ipfs/runmain_test.go @@ -1,3 +1,4 @@ +// Only built when collecting coverage via "go test -tags testrunmain". //go:build testrunmain package main_test diff --git a/cmd/ipfs/util/signal.go b/cmd/ipfs/util/signal.go index c5dba8349b7..84e0b4c588b 100644 --- a/cmd/ipfs/util/signal.go +++ b/cmd/ipfs/util/signal.go @@ -1,3 +1,4 @@ +// Signal handling. Excluded from wasm where os.Signal is unavailable. //go:build !wasm package util diff --git a/cmd/ipfs/util/ui.go b/cmd/ipfs/util/ui.go index f39f1e17104..2c59f4ebd67 100644 --- a/cmd/ipfs/util/ui.go +++ b/cmd/ipfs/util/ui.go @@ -1,3 +1,4 @@ +// GUI detection stub. Windows has its own implementation. //go:build !windows package util diff --git a/cmd/ipfs/util/ulimit_freebsd.go b/cmd/ipfs/util/ulimit_freebsd.go index 358bccfe3bf..96bcc573522 100644 --- a/cmd/ipfs/util/ulimit_freebsd.go +++ b/cmd/ipfs/util/ulimit_freebsd.go @@ -1,3 +1,4 @@ +// FreeBSD ulimit handling via sysctl. //go:build freebsd package util diff --git a/cmd/ipfs/util/ulimit_test.go b/cmd/ipfs/util/ulimit_test.go index 33b077776ed..d145ddf5c1c 100644 --- a/cmd/ipfs/util/ulimit_test.go +++ b/cmd/ipfs/util/ulimit_test.go @@ -1,3 +1,4 @@ +// Ulimit tests. Skipped on windows and plan9 (no getrlimit). //go:build !windows && !plan9 package util diff --git a/cmd/ipfs/util/ulimit_unix.go b/cmd/ipfs/util/ulimit_unix.go index b223de0ff46..94c93899801 100644 --- a/cmd/ipfs/util/ulimit_unix.go +++ b/cmd/ipfs/util/ulimit_unix.go @@ -1,3 +1,4 @@ +// Unix ulimit handling via getrlimit/setrlimit. //go:build darwin || linux || netbsd || openbsd package util diff --git a/cmd/ipfs/util/ulimit_windows.go b/cmd/ipfs/util/ulimit_windows.go index cd1447365f1..0baf4d31f52 100644 --- a/cmd/ipfs/util/ulimit_windows.go +++ b/cmd/ipfs/util/ulimit_windows.go @@ -1,3 +1,4 @@ +// Windows ulimit handling via SetHandleInformation. //go:build windows package util diff --git a/cmd/ipfswatch/ipfswatch_test.go b/cmd/ipfswatch/ipfswatch_test.go index ac68e96cc76..317cbfeb4e9 100644 --- a/cmd/ipfswatch/ipfswatch_test.go +++ b/cmd/ipfswatch/ipfswatch_test.go @@ -1,3 +1,4 @@ +// Excluded from plan9 (no fsnotify support). //go:build !plan9 package main diff --git a/cmd/ipfswatch/main.go b/cmd/ipfswatch/main.go index a25dcbcab86..a4588959859 100644 --- a/cmd/ipfswatch/main.go +++ b/cmd/ipfswatch/main.go @@ -1,3 +1,4 @@ +// Excluded from plan9 (no fsnotify support). //go:build !plan9 package main diff --git a/config/import.go b/config/import.go index 8c40d7d1e11..ba795569589 100644 --- a/config/import.go +++ b/config/import.go @@ -7,9 +7,12 @@ import ( "strings" chunk "github.com/ipfs/boxo/chunker" + merkledag "github.com/ipfs/boxo/ipld/merkledag" "github.com/ipfs/boxo/ipld/unixfs/importer/helpers" uio "github.com/ipfs/boxo/ipld/unixfs/io" + "github.com/ipfs/boxo/mfs" "github.com/ipfs/boxo/verifcid" + cid "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" ) @@ -20,6 +23,7 @@ const ( DefaultHashFunction = "sha2-256" DefaultFastProvideRoot = true DefaultFastProvideWait = false + DefaultFastProvideDAG = false DefaultUnixFSHAMTDirectorySizeThreshold = 262144 // 256KiB - https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L26 @@ -68,6 +72,7 @@ type Import struct { BatchMaxNodes OptionalInteger BatchMaxSize OptionalInteger FastProvideRoot Flag + FastProvideDAG Flag FastProvideWait Flag } @@ -102,10 +107,9 @@ func ValidateImportConfig(cfg *Import) error { if !cfg.UnixFSHAMTDirectoryMaxFanout.IsDefault() { fanout := cfg.UnixFSHAMTDirectoryMaxFanout.WithDefault(DefaultUnixFSHAMTDirectoryMaxFanout) - // Check all requirements: fanout < 8 covers both non-positive and non-multiple of 8 - // Combined with power of 2 check and max limit, this ensures valid values: 8, 16, 32, 64, 128, 256, 512, 1024 + // Valid values are powers of 2 between 8 and 1024: 8, 16, 32, 64, 128, 256, 512, 1024 if fanout < 8 || !isPowerOfTwo(fanout) || fanout > 1024 { - return fmt.Errorf("Import.UnixFSHAMTDirectoryMaxFanout must be a positive power of 2, multiple of 8, and not exceed 1024 (got %d)", fanout) + return fmt.Errorf("Import.UnixFSHAMTDirectoryMaxFanout must be a power of 2, between 8 and 1024 (got %d)", fanout) } } @@ -260,3 +264,47 @@ func (i *Import) UnixFSSplitterFunc() chunk.SplitterGen { return s } } + +// MFSRootOptions returns all MFS root options derived from Import config. +func (i *Import) MFSRootOptions() ([]mfs.Option, error) { + cidBuilder, err := i.UnixFSCidBuilder() + if err != nil { + return nil, err + } + sizeEstimationMode := i.HAMTSizeEstimationMode() + return []mfs.Option{ + mfs.WithCidBuilder(cidBuilder), + mfs.WithChunker(i.UnixFSSplitterFunc()), + mfs.WithMaxLinks(int(i.UnixFSDirectoryMaxLinks.WithDefault(DefaultUnixFSDirectoryMaxLinks))), + mfs.WithMaxHAMTFanout(int(i.UnixFSHAMTDirectoryMaxFanout.WithDefault(DefaultUnixFSHAMTDirectoryMaxFanout))), + mfs.WithHAMTShardingSize(int(i.UnixFSHAMTDirectorySizeThreshold.WithDefault(DefaultUnixFSHAMTDirectorySizeThreshold))), + mfs.WithSizeEstimationMode(sizeEstimationMode), + }, nil +} + +// UnixFSCidBuilder returns a cid.Builder based on Import.CidVersion and +// Import.HashFunction. Always builds an explicit prefix so that MFS +// respects kubo defaults even when they differ from boxo's internal +// CIDv0/sha2-256 default (see https://github.com/ipfs/kubo/issues/4143). +func (i *Import) UnixFSCidBuilder() (cid.Builder, error) { + cidVer := int(i.CidVersion.WithDefault(DefaultCidVersion)) + hashFunc := i.HashFunction.WithDefault(DefaultHashFunction) + + if hashFunc != DefaultHashFunction && cidVer == 0 { + cidVer = 1 + } + + prefix, err := merkledag.PrefixForCidVersion(cidVer) + if err != nil { + return nil, err + } + + hashCode, ok := mh.Names[strings.ToLower(hashFunc)] + if !ok { + return nil, fmt.Errorf("Import.HashFunction unrecognized: %q", hashFunc) + } + prefix.MhType = hashCode + prefix.MhLength = -1 + + return &prefix, nil +} diff --git a/config/import_test.go b/config/import_test.go index 5d9605c1d77..1029bfe2d21 100644 --- a/config/import_test.go +++ b/config/import_test.go @@ -26,25 +26,25 @@ func TestValidateImportConfig_HAMTFanout(t *testing.T) { {name: "valid 1024", fanout: 1024, wantErr: false}, // Invalid values - not powers of 2 - {name: "invalid 7", fanout: 7, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, - {name: "invalid 15", fanout: 15, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, - {name: "invalid 100", fanout: 100, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, - {name: "invalid 257", fanout: 257, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, - {name: "invalid 1000", fanout: 1000, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid 7", fanout: 7, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, + {name: "invalid 15", fanout: 15, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, + {name: "invalid 100", fanout: 100, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, + {name: "invalid 257", fanout: 257, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, + {name: "invalid 1000", fanout: 1000, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, - // Invalid values - powers of 2 but not multiples of 8 - {name: "invalid 1", fanout: 1, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, - {name: "invalid 2", fanout: 2, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, - {name: "invalid 4", fanout: 4, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + // Invalid values - powers of 2 but less than 8 + {name: "invalid 1", fanout: 1, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, + {name: "invalid 2", fanout: 2, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, + {name: "invalid 4", fanout: 4, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, // Invalid values - exceeds 1024 - {name: "invalid 2048", fanout: 2048, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, - {name: "invalid 4096", fanout: 4096, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid 2048", fanout: 2048, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, + {name: "invalid 4096", fanout: 4096, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, // Invalid values - negative or zero - {name: "invalid 0", fanout: 0, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, - {name: "invalid -8", fanout: -8, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, - {name: "invalid -256", fanout: -256, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid 0", fanout: 0, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, + {name: "invalid -8", fanout: -8, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, + {name: "invalid -256", fanout: -256, wantErr: true, errMsg: "must be a power of 2, between 8 and 1024"}, } for _, tt := range tests { @@ -483,6 +483,95 @@ func TestValidateImportConfig_DAGLayout(t *testing.T) { } } +func TestImport_UnixFSCidBuilder(t *testing.T) { + defaultMhType := mh.Names[strings.ToLower(DefaultHashFunction)] + + tests := []struct { + name string + cfg Import + wantCidVer uint64 + wantMhType uint64 + }{ + { + name: "CIDv1 explicit", + cfg: Import{CidVersion: *NewOptionalInteger(1)}, + wantCidVer: 1, + wantMhType: defaultMhType, + }, + { + name: "CIDv0 explicit", + cfg: Import{CidVersion: *NewOptionalInteger(0)}, + wantCidVer: 0, + wantMhType: defaultMhType, + }, + { + name: "non-default hash upgrades CIDv0 to CIDv1", + cfg: Import{HashFunction: *NewOptionalString("sha2-512")}, + wantCidVer: 1, + wantMhType: mh.SHA2_512, + }, + { + name: "CIDv1 with sha2-512", + cfg: Import{ + CidVersion: *NewOptionalInteger(1), + HashFunction: *NewOptionalString("sha2-512"), + }, + wantCidVer: 1, + wantMhType: mh.SHA2_512, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + builder, err := tt.cfg.UnixFSCidBuilder() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if builder == nil { + t.Fatal("expected non-nil builder") + } + c, err := builder.Sum([]byte("test")) + if err != nil { + t.Fatalf("builder.Sum failed: %v", err) + } + pref := c.Prefix() + if pref.Version != tt.wantCidVer { + t.Errorf("CID version = %d, want %d", pref.Version, tt.wantCidVer) + } + if pref.MhType != tt.wantMhType { + t.Errorf("multihash type = 0x%x, want 0x%x", pref.MhType, tt.wantMhType) + } + }) + } +} + +// TestImport_UnixFSCidBuilderDefaults verifies that UnixFSCidBuilder always +// returns an explicit builder even when no config is set, so that MFS +// respects kubo's DefaultCidVersion rather than relying on boxo's internal +// CIDv0 default (relevant for https://github.com/ipfs/kubo/issues/4143). +func TestImport_UnixFSCidBuilderDefaults(t *testing.T) { + cfg := &Import{} + builder, err := cfg.UnixFSCidBuilder() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if builder == nil { + t.Fatal("expected non-nil builder at defaults") + } + c, err := builder.Sum([]byte("test")) + if err != nil { + t.Fatalf("builder.Sum failed: %v", err) + } + pref := c.Prefix() + if pref.Version != uint64(DefaultCidVersion) { + t.Errorf("CID version = %d, want DefaultCidVersion (%d)", pref.Version, DefaultCidVersion) + } + wantMhType := mh.Names[strings.ToLower(DefaultHashFunction)] + if pref.MhType != wantMhType { + t.Errorf("multihash type = 0x%x, want 0x%x (DefaultHashFunction=%s)", pref.MhType, wantMhType, DefaultHashFunction) + } +} + func TestImport_HAMTSizeEstimationMode(t *testing.T) { tests := []struct { cfg string diff --git a/config/mounts.go b/config/mounts.go index 571316cf386..d7bccf0e78a 100644 --- a/config/mounts.go +++ b/config/mounts.go @@ -1,9 +1,39 @@ package config -// Mounts stores the (string) mount points. +const ( + DefaultFuseAllowOther = false + DefaultStoreMtime = false + DefaultStoreMode = false +) + +// Mounts stores FUSE mount point configuration. type Mounts struct { - IPFS string - IPNS string - MFS string - FuseAllowOther bool + // IPFS is the mountpoint for the read-only /ipfs/ namespace. + IPFS string + + // IPNS is the mountpoint for the /ipns/ namespace. Directories backed + // by keys this node holds are writable; all other names resolve through + // IPNS to read-only symlinks into the /ipfs mount. + IPNS string + + // MFS is the mountpoint for the Mutable File System (ipfs files API). + MFS string + + // FuseAllowOther sets the FUSE allow_other mount option, letting + // users other than the mounter access the mounted filesystem. + FuseAllowOther Flag + + // StoreMtime controls whether writable mounts (/ipns and /mfs) persist + // the current time as mtime in UnixFS metadata when creating a file or + // opening it for writing. This changes the resulting CID even when file + // content is identical. + // + // Reading mtime from UnixFS is always enabled on all mounts. + StoreMtime Flag + + // StoreMode controls whether writable mounts (/ipns and /mfs) persist + // POSIX permission bits in UnixFS metadata when a chmod request is made. + // + // Reading mode from UnixFS is always enabled on all mounts. + StoreMode Flag } diff --git a/config/profile.go b/config/profile.go index cfcc828c2d7..860a2374ccf 100644 --- a/config/profile.go +++ b/config/profile.go @@ -327,7 +327,7 @@ fetching may be degraded. Description: `Legacy UnixFS import profile for backward-compatible CID generation. Produces CIDv0 with no raw leaves, sha2-256, 256 KiB chunks, and link-based HAMT size estimation. Use only when legacy CIDs are required. -See https://github.com/ipfs/specs/pull/499. Alias: legacy-cid-v0`, +See https://specs.ipfs.tech/ipips/ipip-0499/. Alias: legacy-cid-v0`, Transform: applyUnixFSv02015, }, "legacy-cid-v0": { @@ -338,7 +338,7 @@ See https://github.com/ipfs/specs/pull/499. Alias: legacy-cid-v0`, Description: `Recommended UnixFS import profile for cross-implementation CID determinism. Uses CIDv1, raw leaves, sha2-256, 1 MiB chunks, 1024 links per file node, 256 HAMT fanout, and block-based size estimation for HAMT threshold. -See https://github.com/ipfs/specs/pull/499`, +See https://specs.ipfs.tech/ipips/ipip-0499/`, Transform: func(c *Config) error { c.Import.CidVersion = *NewOptionalInteger(1) c.Import.UnixFSRawLeaves = True diff --git a/config/provide.go b/config/provide.go index 8fae9ded7d8..c6c79f888cd 100644 --- a/config/provide.go +++ b/config/provide.go @@ -12,6 +12,22 @@ const ( DefaultProvideEnabled = true DefaultProvideStrategy = "all" + // DefaultProvideBloomFPRate is the target false positive rate for the + // bloom filter used by +unique and +entities reprovide cycles and + // fast-provide-dag walks. Expressed as 1/N (one false positive per N + // lookups). At ~1 in 4.75M (~0.00002%) each CID costs ~4 bytes before + // ipfs/bbloom's power-of-two rounding. + // + // Kubo owns this default independently of boxo/dag/walker; the two + // values may diverge over time without coordination. + DefaultProvideBloomFPRate = 4_750_000 + + // MinProvideBloomFPRate is the smallest accepted Provide.BloomFPRate. + // Below 1 in 1M the bloom filter becomes lossy enough to drop a + // meaningful fraction of CIDs from each reprovide cycle (e.g. at + // rate=10_000 a 100M-CID repo skips ~10K CIDs per cycle). + MinProvideBloomFPRate = 1_000_000 + // DHT provider defaults DefaultProvideDHTInterval = 22 * time.Hour // https://github.com/ipfs/kubo/pull/9326 DefaultProvideDHTMaxWorkers = 16 // Unified default for both sweep and legacy providers @@ -36,6 +52,8 @@ const ( ProvideStrategyPinned ProvideStrategyRoots ProvideStrategyMFS + ProvideStrategyUnique // bloom filter cross-DAG deduplication + ProvideStrategyEntities // entity-aware traversal (implies Unique) ) // Provide configures both immediate CID announcements (provide operations) for new content @@ -50,6 +68,16 @@ type Provide struct { // Default: DefaultProvideStrategy Strategy *OptionalString `json:",omitempty"` + // BloomFPRate sets the target false positive rate of the bloom filter + // used by Provide.Strategy modifiers +unique and +entities (and the + // matching fast-provide-dag walk). Expressed as 1/N (one false + // positive per N lookups), so higher N means lower FP rate but more + // memory per CID. Only takes effect when Provide.Strategy includes + // +unique or +entities. + // + // Default: DefaultProvideBloomFPRate + BloomFPRate *OptionalInteger `json:",omitempty"` + // DHT configures DHT-specific provide and reprovide settings. DHT ProvideDHT } @@ -100,25 +128,78 @@ type ProvideDHT struct { ResumeEnabled Flag `json:",omitempty"` } -func ParseProvideStrategy(s string) ProvideStrategy { +func ParseProvideStrategy(s string) (ProvideStrategy, error) { var strategy ProvideStrategy for part := range strings.SplitSeq(s, "+") { switch part { - case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all") - return ProvideStrategyAll + case "all", "flat": + strategy |= ProvideStrategyAll + case "": + // empty string (default config) maps to "all", + // but empty tokens from splitting (e.g. "pinned+") are invalid + if s == "" { + strategy |= ProvideStrategyAll + } else { + return 0, fmt.Errorf("invalid provide strategy: empty token in %q", s) + } case "pinned": strategy |= ProvideStrategyPinned case "roots": strategy |= ProvideStrategyRoots case "mfs": strategy |= ProvideStrategyMFS + case "unique": + strategy |= ProvideStrategyUnique + case "entities": + strategy |= ProvideStrategyEntities | ProvideStrategyUnique + default: + return 0, fmt.Errorf("unknown provide strategy token: %q in %q", part, s) + } + } + // "all" provides every block and cannot be combined with selective strategies + if strategy&ProvideStrategyAll != 0 && strategy != ProvideStrategyAll { + return 0, fmt.Errorf("\"all\" strategy cannot be combined with other strategies in %q", s) + } + // +unique/+entities require a base strategy that walks DAGs (pinned and/or mfs) + wantsDedup := strategy&(ProvideStrategyUnique|ProvideStrategyEntities) != 0 + if wantsDedup { + walksDAGs := strategy&(ProvideStrategyPinned|ProvideStrategyMFS) != 0 + if !walksDAGs { + return 0, fmt.Errorf("+unique/+entities must combine with pinned and/or mfs in %q", s) + } + if strategy&ProvideStrategyRoots != 0 { + return 0, fmt.Errorf("+unique/+entities is incompatible with roots in %q", s) } } + return strategy, nil +} + +// MustParseProvideStrategy is like ParseProvideStrategy but panics on error. +// Use with strategy strings that have already been validated at startup. +func MustParseProvideStrategy(s string) ProvideStrategy { + strategy, err := ParseProvideStrategy(s) + if err != nil { + panic(err) + } return strategy } // ValidateProvideConfig validates the Provide configuration according to DHT requirements. func ValidateProvideConfig(cfg *Provide) error { + // Validate Provide.Strategy + strategy := cfg.Strategy.WithDefault(DefaultProvideStrategy) + if _, err := ParseProvideStrategy(strategy); err != nil { + return fmt.Errorf("Provide.Strategy: %w", err) + } + + // Validate Provide.BloomFPRate + if !cfg.BloomFPRate.IsDefault() { + rate := cfg.BloomFPRate.WithDefault(DefaultProvideBloomFPRate) + if rate < MinProvideBloomFPRate { + return fmt.Errorf("Provide.BloomFPRate must be >= %d (1 in 1M), got %d", MinProvideBloomFPRate, rate) + } + } + // Validate Provide.DHT.Interval if !cfg.DHT.Interval.IsDefault() { interval := cfg.DHT.Interval.WithDefault(DefaultProvideDHTInterval) @@ -184,7 +265,7 @@ func ValidateProvideConfig(cfg *Provide) error { // ShouldProvideForStrategy determines if content should be provided based on the provide strategy // and content characteristics (pinned status, root status, MFS status). func ShouldProvideForStrategy(strategy ProvideStrategy, isPinned bool, isPinnedRoot bool, isMFS bool) bool { - if strategy == ProvideStrategyAll { + if strategy&ProvideStrategyAll != 0 { // 'all' strategy: always provide return true } diff --git a/config/provide_test.go b/config/provide_test.go index 5c8f5fac119..cd0f3902593 100644 --- a/config/provide_test.go +++ b/config/provide_test.go @@ -9,27 +9,146 @@ import ( ) func TestParseProvideStrategy(t *testing.T) { - tests := []struct { - input string - expect ProvideStrategy - }{ - {"all", ProvideStrategyAll}, - {"pinned", ProvideStrategyPinned}, - {"mfs", ProvideStrategyMFS}, - {"pinned+mfs", ProvideStrategyPinned | ProvideStrategyMFS}, - {"invalid", 0}, - {"all+invalid", ProvideStrategyAll}, - {"", ProvideStrategyAll}, - {"flat", ProvideStrategyAll}, // deprecated, maps to "all" - {"flat+all", ProvideStrategyAll}, - } + t.Run("valid strategies", func(t *testing.T) { + tests := []struct { + input string + expect ProvideStrategy + }{ + {"all", ProvideStrategyAll}, + {"pinned", ProvideStrategyPinned}, + {"roots", ProvideStrategyRoots}, + {"mfs", ProvideStrategyMFS}, + {"pinned+mfs", ProvideStrategyPinned | ProvideStrategyMFS}, + {"pinned+roots", ProvideStrategyPinned | ProvideStrategyRoots}, + {"pinned+mfs+roots", ProvideStrategyPinned | ProvideStrategyMFS | ProvideStrategyRoots}, + {"", ProvideStrategyAll}, // empty string = default = all + {"flat", ProvideStrategyAll}, // deprecated, maps to "all" + {"flat+all", ProvideStrategyAll}, // redundant but valid + {"all+all", ProvideStrategyAll}, // redundant but valid + {"mfs+pinned", ProvideStrategyMFS | ProvideStrategyPinned}, // order doesn't matter + // +unique and +entities modifiers + {"pinned+unique", ProvideStrategyPinned | ProvideStrategyUnique}, + {"pinned+entities", ProvideStrategyPinned | ProvideStrategyEntities | ProvideStrategyUnique}, + {"pinned+unique+entities", ProvideStrategyPinned | ProvideStrategyUnique | ProvideStrategyEntities}, + {"mfs+unique", ProvideStrategyMFS | ProvideStrategyUnique}, + {"mfs+entities", ProvideStrategyMFS | ProvideStrategyEntities | ProvideStrategyUnique}, + {"pinned+mfs+unique", ProvideStrategyPinned | ProvideStrategyMFS | ProvideStrategyUnique}, + {"pinned+mfs+entities", ProvideStrategyPinned | ProvideStrategyMFS | ProvideStrategyEntities | ProvideStrategyUnique}, + } - for _, tt := range tests { - result := ParseProvideStrategy(tt.input) - if result != tt.expect { - t.Errorf("ParseProvideStrategy(%q) = %d, want %d", tt.input, result, tt.expect) + for _, tt := range tests { + result, err := ParseProvideStrategy(tt.input) + require.NoError(t, err, "ParseProvideStrategy(%q)", tt.input) + assert.Equal(t, tt.expect, result, "ParseProvideStrategy(%q)", tt.input) } - } + }) + + t.Run("unknown token (including typos)", func(t *testing.T) { + tests := []struct { + input string + err string + }{ + {"invalid", `unknown provide strategy token: "invalid"`}, + {"uniuqe", `unknown provide strategy token: "uniuqe"`}, // typo of "unique" + {"entites", `unknown provide strategy token: "entites"`}, // cspell:disable-line -- intentional typo of "entities" + {"pinned+uniuqe", `unknown provide strategy token: "uniuqe"`}, // typo in combo + } + + for _, tt := range tests { + _, err := ParseProvideStrategy(tt.input) + require.Error(t, err, "ParseProvideStrategy(%q) should fail", tt.input) + assert.Contains(t, err.Error(), tt.err) + } + }) + + t.Run("empty token from delimiter", func(t *testing.T) { + tests := []string{ + "pinned+", // trailing + + "+pinned", // leading + + "pinned++mfs", // double + + } + + for _, input := range tests { + _, err := ParseProvideStrategy(input) + require.Error(t, err, "ParseProvideStrategy(%q) should fail", input) + assert.Contains(t, err.Error(), "empty token") + } + }) + + t.Run("all cannot be combined with other strategies", func(t *testing.T) { + tests := []string{ + "all+pinned", + "all+mfs", + "all+roots", + "flat+pinned", + "all+pinned+mfs", + } + + for _, input := range tests { + _, err := ParseProvideStrategy(input) + require.Error(t, err, "ParseProvideStrategy(%q) should fail", input) + assert.Contains(t, err.Error(), "cannot be combined") + } + }) + + t.Run("+unique/+entities require base strategy", func(t *testing.T) { + tests := []string{ + "unique", // modifier alone + "entities", // modifier alone + "unique+entities", // modifiers without base + "roots+unique", // roots is incompatible + "roots+entities", // roots is incompatible + "roots+pinned+unique", // roots mixed with pinned+unique + } + + for _, input := range tests { + _, err := ParseProvideStrategy(input) + require.Error(t, err, "ParseProvideStrategy(%q) should fail", input) + } + }) +} + +func TestMustParseProvideStrategy(t *testing.T) { + t.Run("valid input returns strategy", func(t *testing.T) { + assert.Equal(t, ProvideStrategyAll, MustParseProvideStrategy("all")) + assert.Equal(t, ProvideStrategyPinned|ProvideStrategyMFS, MustParseProvideStrategy("pinned+mfs")) + }) + + t.Run("invalid input panics", func(t *testing.T) { + assert.Panics(t, func() { MustParseProvideStrategy("bogus") }) + assert.Panics(t, func() { MustParseProvideStrategy("all+pinned") }) + }) +} + +func TestValidateProvideConfig_Strategy(t *testing.T) { + t.Run("valid strategies", func(t *testing.T) { + for _, s := range []string{ + "all", "pinned", "roots", "mfs", "pinned+mfs", + "pinned+unique", "pinned+entities", "pinned+mfs+entities", + } { + cfg := &Provide{Strategy: NewOptionalString(s)} + require.NoError(t, ValidateProvideConfig(cfg), "strategy=%q", s) + } + }) + + t.Run("default (nil) strategy is valid", func(t *testing.T) { + cfg := &Provide{} + require.NoError(t, ValidateProvideConfig(cfg)) + }) + + t.Run("invalid strategy", func(t *testing.T) { + cfg := &Provide{Strategy: NewOptionalString("bogus")} + err := ValidateProvideConfig(cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "Provide.Strategy") + }) + + t.Run("all combined with others", func(t *testing.T) { + cfg := &Provide{Strategy: NewOptionalString("all+pinned")} + err := ValidateProvideConfig(cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "cannot be combined") + }) } func TestValidateProvideConfig_Interval(t *testing.T) { @@ -70,6 +189,49 @@ func TestValidateProvideConfig_Interval(t *testing.T) { } } +func TestValidateProvideConfig_BloomFPRate(t *testing.T) { + tests := []struct { + name string + fpRate int64 + wantErr bool + errMsg string + }{ + {"valid default value", DefaultProvideBloomFPRate, false, ""}, + {"valid minimum (1M)", MinProvideBloomFPRate, false, ""}, + {"valid high (10M)", 10_000_000, false, ""}, + {"valid very high (100M)", 100_000_000, false, ""}, + {"invalid below minimum (999_999)", 999_999, true, "must be >="}, + {"invalid small (10_000)", 10_000, true, "must be >="}, + {"invalid one", 1, true, "must be >="}, + {"invalid zero", 0, true, "must be >="}, + {"invalid negative", -1, true, "must be >="}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Provide{ + BloomFPRate: NewOptionalInteger(tt.fpRate), + } + + err := ValidateProvideConfig(cfg) + + if tt.wantErr { + require.Error(t, err, "expected error for fpRate=%d", tt.fpRate) + if tt.errMsg != "" { + assert.Contains(t, err.Error(), tt.errMsg, "error message mismatch") + } + } else { + require.NoError(t, err, "unexpected error for fpRate=%d", tt.fpRate) + } + }) + } + + t.Run("default (nil) BloomFPRate is valid", func(t *testing.T) { + cfg := &Provide{} + require.NoError(t, ValidateProvideConfig(cfg)) + }) +} + func TestValidateProvideConfig_MaxWorkers(t *testing.T) { tests := []struct { name string diff --git a/core/commands/add.go b/core/commands/add.go index 2d47fa811ff..1cd063120ce 100644 --- a/core/commands/add.go +++ b/core/commands/add.go @@ -19,6 +19,7 @@ import ( mfs "github.com/ipfs/boxo/mfs" "github.com/ipfs/boxo/path" "github.com/ipfs/boxo/verifcid" + cid "github.com/ipfs/go-cid" cmds "github.com/ipfs/go-ipfs-cmds" ipld "github.com/ipfs/go-ipld-format" coreiface "github.com/ipfs/kubo/core/coreiface" @@ -68,6 +69,7 @@ const ( mtimeOptionName = "mtime" mtimeNsecsOptionName = "mtime-nsecs" fastProvideRootOptionName = "fast-provide-root" + fastProvideDAGOptionName = "fast-provide-dag" fastProvideWaitOptionName = "fast-provide-wait" emptyDirsOptionName = "empty-dirs" ) @@ -82,26 +84,41 @@ var AddCmd = &cmds.Command{ ShortDescription: ` Adds the content of to IPFS. Use -r to add directories (recursively). -FAST PROVIDE OPTIMIZATION: +CONTENT DISCOVERABILITY: -When you add content to IPFS, the sweep provider queues it for efficient -DHT provides over time. While this is resource-efficient, other peers won't -find your content immediately after 'ipfs add' completes. +How quickly other peers can find your content depends on Provide.Strategy: -To make sharing faster, 'ipfs add' does an immediate provide of the root CID -to the DHT in addition to the regular queue. This complements the sweep provider: -fast-provide handles the urgent case (root CIDs that users share and reference), -while the sweep provider efficiently provides all blocks according to -Provide.Strategy over time. + Provide.Strategy=all (default): + Every block is announced to the routing system as it is written to + the blockstore. Content is discoverable immediately. -By default, this immediate provide runs in the background without blocking -the command. If you need certainty that the root CID is discoverable before -the command returns (e.g., sharing a link immediately), use --fast-provide-wait -to wait for the provide to complete. Use --fast-provide-root=false to skip -this optimization. + Selective strategies (pinned, mfs, pinned+mfs): + Only the root CID is announced immediately after 'ipfs add'. + Remaining blocks are announced during the next reprovide cycle + (Provide.DHT.Interval, default 22h). -This works best with the sweep provider and accelerated DHT client. -Automatically skipped when DHT is not available. +FAST PROVIDE FLAGS: + + --fast-provide-root (default: enabled) + Announce the root CID to the routing system immediately after add, + in addition to the regular provide queue. Runs in the background + without blocking. Set to false to skip extra provides and minimize + network overhead when importing a lot of data at once. + + --fast-provide-dag (default: disabled) + Walk and provide the full DAG immediately after add, using the + active Provide.Strategy to determine scope. Useful with selective + strategies when all blocks need to be discoverable right away. + No effect with Provide.Strategy=all (blockstore already provides + every block on write). + + --fast-provide-wait (default: disabled) + Block until the immediate provide completes before returning. + Use when you need certainty that content is discoverable before + the command returns (e.g., sharing a link immediately after adding). + +All fast-provide flags require an active DHT client. Skipped automatically +when only HTTP delegated routing is configured. `, LongDescription: ` Adds the content of to IPFS. Use -r to add directories. @@ -253,7 +270,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import // Advanced UnixFS Limits cmds.IntOption(maxFileLinksOptionName, "Limit the maximum number of links in UnixFS file nodes to this value. WARNING: experimental. Default: Import.UnixFSFileMaxLinks"), cmds.IntOption(maxDirectoryLinksOptionName, "Limit the maximum number of links in UnixFS basic directory nodes to this value. WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSDirectoryMaxLinks"), - cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, multiple of 8). WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSHAMTDirectoryMaxFanout"), + cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, between 8 and 1024). WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSHAMTDirectoryMaxFanout"), // Experimental Features cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. WARNING: experimental"), cmds.IntOption(inlineLimitOptionName, fmt.Sprintf("Maximum block size to inline. Maximum: %d bytes. WARNING: experimental", verifcid.DefaultMaxIdentityDigestSize)).WithDefault(32), @@ -265,6 +282,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). WARNING: experimental, forces dag-pb for root block, disables raw-leaves"), cmds.UintOption(mtimeNsecsOptionName, "Custom POSIX modification time (optional time fraction in nanoseconds)"), cmds.BoolOption(fastProvideRootOptionName, "Immediately provide root CID to DHT in addition to regular queue, for faster discovery. Default: Import.FastProvideRoot"), + cmds.BoolOption(fastProvideDAGOptionName, "Walk and provide the full DAG according to Provide.Strategy immediately after add. Default: Import.FastProvideDAG"), cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes before returning. Default: Import.FastProvideWait"), }, PreRun: func(req *cmds.Request, env cmds.Environment) error { @@ -338,6 +356,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import mtime, _ := req.Options[mtimeOptionName].(int64) mtimeNsecs, _ := req.Options[mtimeNsecsOptionName].(uint) fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool) + fastProvideDAG, fastProvideDAGSet := req.Options[fastProvideDAGOptionName].(bool) fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool) emptyDirs, _ := req.Options[emptyDirsOptionName].(bool) @@ -390,8 +409,17 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import sizeEstimationMode = cfg.Import.HAMTSizeEstimationMode() fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot) + fastProvideDAG = config.ResolveBoolFromConfig(fastProvideDAG, fastProvideDAGSet, cfg.Import.FastProvideDAG, config.DefaultFastProvideDAG) fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait) + // --only-hash does not store data, so pinning and providing + // are meaningless. + if onlyHash { + dopin = false + fastProvideRoot = false + fastProvideDAG = false + } + // Storing optional mode or mtime (UnixFS 1.5) requires root block // to always be 'dag-pb' and not 'raw'. Below adjusts raw-leaves setting, if possible. if preserveMode || preserveMtime || mode != 0 || mtime != 0 { @@ -642,20 +670,34 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import return fmt.Errorf("expected a file argument") } - // Apply fast-provide-root if the flag is enabled - if fastProvideRoot && (lastRootCid != path.ImmutablePath{}) { + hasRoot := lastRootCid != path.ImmutablePath{} + + if fastProvideDAG && hasRoot { + // DAG walk includes the root CID (DFS pre-order emits it + // first), so a separate root provide is not needed. + cmdenv.ExecuteFastProvideDAG( + req.Context, + ipfsNode.Context(), + []cid.Cid{lastRootCid.RootCid()}, + ipfsNode.ProvidingStrategy, + ipfsNode.Blockstore, + ipfsNode.Provider, + fastProvideWait, + uint(cfg.Provide.BloomFPRate.WithDefault(config.DefaultProvideBloomFPRate)), + 0, // block count unknown here; bloom chain auto-grows + ) + } else if fastProvideRoot && hasRoot { cfg, err := ipfsNode.Repo.Config() if err != nil { return err } - if err := cmdenv.ExecuteFastProvide(req.Context, ipfsNode, cfg, lastRootCid.RootCid(), fastProvideWait, dopin, dopin, toFilesSet); err != nil { + if err := cmdenv.ExecuteFastProvideRoot(req.Context, ipfsNode, cfg, lastRootCid.RootCid(), fastProvideWait, dopin, dopin, toFilesSet); err != nil { return err } - } else if !fastProvideRoot { + } else if !fastProvideRoot && !fastProvideDAG { + log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config") if fastProvideWait { - log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config", "wait-flag-ignored", true) - } else { - log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config") + log.Debugw("fast-provide-root: wait-flag-ignored") } } diff --git a/core/commands/bitswap.go b/core/commands/bitswap.go index 7bddaac60ac..1dab42ee376 100644 --- a/core/commands/bitswap.go +++ b/core/commands/bitswap.go @@ -80,7 +80,7 @@ Print out all blocks currently on the bitswap wantlist for the local peer.`, }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *KeyList) error { - enc, err := cmdenv.GetLowLevelCidEncoder(req) + enc, err := cmdenv.GetCidEncoder(req) if err != nil { return err } @@ -128,7 +128,7 @@ var bitswapStatCmd = &cmds.Command{ }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s *bitswap.Stat) error { - enc, err := cmdenv.GetLowLevelCidEncoder(req) + enc, err := cmdenv.GetCidEncoder(req) if err != nil { return err } diff --git a/core/commands/block.go b/core/commands/block.go index 1402a8531fb..c34e54970ae 100644 --- a/core/commands/block.go +++ b/core/commands/block.go @@ -67,6 +67,11 @@ on raw IPFS blocks. It outputs the following to stdout: return err } + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } + p, err := cmdutils.PathOrCidPath(req.Arguments[0]) if err != nil { return err @@ -78,7 +83,7 @@ on raw IPFS blocks. It outputs the following to stdout: } return cmds.EmitOnce(res, &BlockStat{ - Key: b.Path().RootCid().String(), + Key: enc.Encode(b.Path().RootCid()), Size: b.Size(), }) }, @@ -171,6 +176,11 @@ only for backward compatibility when a legacy CIDv0 is required (--format=v0). return err } + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } + nd, err := cmdenv.GetNode(env) if err != nil { return err @@ -230,7 +240,7 @@ only for backward compatibility when a legacy CIDv0 is required (--format=v0). } err = res.Emit(&BlockStat{ - Key: p.Path().RootCid().String(), + Key: enc.Encode(p.Path().RootCid()), Size: p.Size(), }) if err != nil { @@ -280,6 +290,11 @@ It takes a list of CIDs to remove from the local datastore.. return err } + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } + force, _ := req.Options[forceOptionName].(bool) quiet, _ := req.Options[blockQuietOptionName].(bool) @@ -298,7 +313,7 @@ It takes a list of CIDs to remove from the local datastore.. err = api.Block().Rm(req.Context, rp, options.Block.Force(force)) if err != nil { if err := res.Emit(&removedBlock{ - Hash: rp.RootCid().String(), + Hash: enc.Encode(rp.RootCid()), Error: err.Error(), }); err != nil { return err @@ -308,7 +323,7 @@ It takes a list of CIDs to remove from the local datastore.. if !quiet { err := res.Emit(&removedBlock{ - Hash: rp.RootCid().String(), + Hash: enc.Encode(rp.RootCid()), }) if err != nil { return err diff --git a/core/commands/cid.go b/core/commands/cid.go index 4278b1ad347..a1540a359d1 100644 --- a/core/commands/cid.go +++ b/core/commands/cid.go @@ -2,6 +2,7 @@ package commands import ( "cmp" + "encoding/hex" "errors" "fmt" "io" @@ -14,6 +15,7 @@ import ( cidutil "github.com/ipfs/go-cidutil" cmds "github.com/ipfs/go-ipfs-cmds" ipldmulticodec "github.com/ipld/go-ipld-prime/multicodec" + peer "github.com/libp2p/go-libp2p/core/peer" mbase "github.com/multiformats/go-multibase" mc "github.com/multiformats/go-multicodec" mhash "github.com/multiformats/go-multihash" @@ -24,11 +26,12 @@ var CidCmd = &cmds.Command{ Tagline: "Convert and discover properties of CIDs", }, Subcommands: map[string]*cmds.Command{ - "format": cidFmtCmd, - "base32": base32Cmd, - "bases": basesCmd, - "codecs": codecsCmd, - "hashes": hashesCmd, + "inspect": inspectCmd, + "format": cidFmtCmd, + "base32": base32Cmd, + "bases": basesCmd, + "codecs": codecsCmd, + "hashes": hashesCmd, }, Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } @@ -46,6 +49,8 @@ var cidFmtCmd = &cmds.Command{ LongDescription: ` Format and converts 's in various useful ways. +For a human-readable breakdown of a CID, see 'ipfs cid inspect'. + The optional format string is a printf style format string: ` + cidutil.FormatRef, }, @@ -400,6 +405,179 @@ var hashesCmd = &cmds.Command{ Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } +// CidInspectRes represents the response from the inspect command. +type CidInspectRes struct { + Cid string `json:"cid"` + Version int `json:"version"` + Multibase CidInspectBase `json:"multibase"` + Multicodec CidInspectCodec `json:"multicodec"` + Multihash CidInspectHash `json:"multihash"` + CidV0 string `json:"cidV0,omitempty"` + CidV1 string `json:"cidV1"` + ErrorMsg string `json:"errorMsg,omitempty"` +} + +type CidInspectBase struct { + Prefix string `json:"prefix"` + Name string `json:"name"` +} + +type CidInspectCodec struct { + Code uint64 `json:"code"` + Name string `json:"name"` +} + +type CidInspectHash struct { + Code uint64 `json:"code"` + Name string `json:"name"` + Length int `json:"length"` + Digest string `json:"digest"` +} + +var inspectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Inspect and display detailed information about a CID.", + ShortDescription: ` +'ipfs cid inspect' breaks down a CID and displays its components: +- CID version (0 or 1) +- Multibase encoding (explicit for CIDv1, implicit for CIDv0) +- Multicodec (DAG type) +- Multihash (hash algorithm, length, and digest) +- Equivalent CIDv0 and CIDv1 representations + +For CIDv0, multibase, multicodec, and multihash are marked as +implicit because they are not explicitly encoded in the binary. + +If a PeerID string is provided instead of a CID, a helpful error +with the equivalent CID representation is returned. + +Use --enc=json for machine-readable output same as the HTTP RPC API. +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, false, "CID to inspect.").EnableStdin(), + }, + Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error { + cidStr := req.Arguments[0] + + c, err := cid.Decode(cidStr) + if err != nil { + errMsg := fmt.Sprintf("invalid CID: %s", err) + // PeerID fallback: try peer.Decode for legacy PeerIDs (12D3KooW..., Qm...) + if pid, pidErr := peer.Decode(cidStr); pidErr == nil { + pidCid := peer.ToCid(pid) + cidV1, _ := pidCid.StringOfBase(mbase.Base36) + errMsg += fmt.Sprintf("\nNote: the value is a PeerID; inspect its CID representation instead:\n %s", cidV1) + } + return cmds.EmitOnce(resp, &CidInspectRes{Cid: cidStr, ErrorMsg: errMsg}) + } + + res := &CidInspectRes{ + Cid: cidStr, + Version: int(c.Version()), + } + + // Multibase: always populated; CIDv0 uses implicit base58btc + if c.Version() == 0 { + res.Multibase = CidInspectBase{Prefix: "z", Name: "base58btc"} + } else { + baseCode, _ := cid.ExtractEncoding(cidStr) + res.Multibase = CidInspectBase{ + Prefix: string(rune(baseCode)), + Name: mbase.EncodingToStr[baseCode], + } + } + + // Multicodec + codecName := mc.Code(c.Type()).String() + if codecName == "" || strings.HasPrefix(codecName, "Code(") { + codecName = "unknown" + } + res.Multicodec = CidInspectCodec{Code: c.Type(), Name: codecName} + + // Multihash + dmh, err := mhash.Decode(c.Hash()) + if err != nil { + return cmds.EmitOnce(resp, &CidInspectRes{ + Cid: cidStr, + ErrorMsg: fmt.Sprintf("failed to decode multihash: %s", err), + }) + } + hashName := mhash.Codes[dmh.Code] + if hashName == "" { + hashName = "unknown" + } + res.Multihash = CidInspectHash{ + Code: dmh.Code, + Name: hashName, + Length: dmh.Length, + Digest: hex.EncodeToString(dmh.Digest), + } + + // CIDv0: only possible with dag-pb + sha2-256-256 + if c.Type() == cid.DagProtobuf && dmh.Code == mhash.SHA2_256 && dmh.Length == 32 { + res.CidV0 = cid.NewCidV0(c.Hash()).String() + } + + // CIDv1: use base36 for libp2p-key, base32 for everything else + v1 := cid.NewCidV1(c.Type(), c.Hash()) + v1Base := mbase.Encoding(mbase.Base32) + if c.Type() == uint64(mc.Libp2pKey) { + v1Base = mbase.Base36 + } + v1Str, err := v1.StringOfBase(v1Base) + if err != nil { + v1Str = v1.String() + } + res.CidV1 = v1Str + + return cmds.EmitOnce(resp, res) + }, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, res *CidInspectRes) error { + if res.ErrorMsg != "" { + return fmt.Errorf("%s", res.ErrorMsg) + } + + implicit := "" + if res.Version == 0 { + implicit = ", implicit" + } + + fmt.Fprintf(w, "CID: %s\n", res.Cid) + fmt.Fprintf(w, "Version: %d\n", res.Version) + if res.Version == 0 { + fmt.Fprintf(w, "Multibase: %s (implicit)\n", res.Multibase.Name) + } else { + fmt.Fprintf(w, "Multibase: %s (%s)\n", res.Multibase.Name, res.Multibase.Prefix) + } + fmt.Fprintf(w, "Multicodec: %s (0x%x%s)\n", res.Multicodec.Name, res.Multicodec.Code, implicit) + fmt.Fprintf(w, "Multihash: %s (0x%x%s)\n", res.Multihash.Name, res.Multihash.Code, implicit) + fmt.Fprintf(w, " Length: %d bytes\n", res.Multihash.Length) + fmt.Fprintf(w, " Digest: %s\n", res.Multihash.Digest) + + if res.CidV0 != "" { + fmt.Fprintf(w, "CIDv0: %s\n", res.CidV0) + } else if res.Multicodec.Code != cid.DagProtobuf { + fmt.Fprintf(w, "CIDv0: not possible, requires dag-pb (0x70), got %s (0x%x)\n", + res.Multicodec.Name, res.Multicodec.Code) + } else if res.Multihash.Code != mhash.SHA2_256 { + fmt.Fprintf(w, "CIDv0: not possible, requires sha2-256 (0x12), got %s (0x%x)\n", + res.Multihash.Name, res.Multihash.Code) + } else if res.Multihash.Length != 32 { + fmt.Fprintf(w, "CIDv0: not possible, requires 32-byte digest, got %d\n", + res.Multihash.Length) + } + + fmt.Fprintf(w, "CIDv1: %s\n", res.CidV1) + + return nil + }), + }, + Type: CidInspectRes{}, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), +} + type multibaseSorter struct { data []CodeAndName } diff --git a/core/commands/cmdenv/cidbase.go b/core/commands/cmdenv/cidbase.go index 926cd24a105..0df0fc8691a 100644 --- a/core/commands/cmdenv/cidbase.go +++ b/core/commands/cmdenv/cidbase.go @@ -11,24 +11,20 @@ import ( ) var ( - OptionCidBase = cmds.StringOption("cid-base", "Multibase encoding used for version 1 CIDs in output.") - OptionUpgradeCidV0InOutput = cmds.BoolOption("upgrade-cidv0-in-output", "Upgrade version 0 to version 1 CIDs in output.") + OptionCidBase = cmds.StringOption("cid-base", "Multibase encoding for CIDs in output. CIDv0 is automatically converted to CIDv1 when a base other than base58btc is specified.") + + // OptionUpgradeCidV0InOutput is deprecated. When --cid-base is set to + // anything other than base58btc, CIDv0 are now automatically upgraded + // to CIDv1. This flag is kept for backward compatibility and will be + // removed in a future release. + OptionUpgradeCidV0InOutput = cmds.BoolOption("upgrade-cidv0-in-output", "[DEPRECATED] Upgrade version 0 to version 1 CIDs in output.") ) -// GetCidEncoder processes the `cid-base` and `output-cidv1` options and -// returns an encoder to use based on those parameters. +// GetCidEncoder processes the --cid-base option and returns an encoder. +// When --cid-base is set to a non-base58btc encoding, CIDv0 values are +// automatically upgraded to CIDv1 because CIDv0 can only be represented +// in base58btc. func GetCidEncoder(req *cmds.Request) (cidenc.Encoder, error) { - return getCidBase(req, true) -} - -// GetLowLevelCidEncoder is like GetCidEncoder but meant to be used by lower -// level commands. It differs from GetCidEncoder in that CIDv0 are not, by -// default, auto-upgraded to CIDv1. -func GetLowLevelCidEncoder(req *cmds.Request) (cidenc.Encoder, error) { - return getCidBase(req, false) -} - -func getCidBase(req *cmds.Request, autoUpgrade bool) (cidenc.Encoder, error) { base, _ := req.Options[OptionCidBase.Name()].(string) upgrade, upgradeDefined := req.Options[OptionUpgradeCidV0InOutput.Name()].(bool) @@ -40,11 +36,16 @@ func getCidBase(req *cmds.Request, autoUpgrade bool) (cidenc.Encoder, error) { if err != nil { return e, err } - if autoUpgrade { + // CIDv0 can only be represented in base58btc. When any other + // base is requested, always upgrade CIDv0 to CIDv1 so the + // output actually uses the requested encoding. + if e.Base.Encoding() != mbase.Base58BTC { e.Upgrade = true } } + // Deprecated: --upgrade-cidv0-in-output still works as an explicit + // override for backward compatibility. if upgradeDefined { e.Upgrade = upgrade } diff --git a/core/commands/cmdenv/cidbase_test.go b/core/commands/cmdenv/cidbase_test.go index f1dd22a52e1..7484ce3a7cb 100644 --- a/core/commands/cmdenv/cidbase_test.go +++ b/core/commands/cmdenv/cidbase_test.go @@ -4,9 +4,82 @@ import ( "testing" cidenc "github.com/ipfs/go-cidutil/cidenc" + cmds "github.com/ipfs/go-ipfs-cmds" mbase "github.com/multiformats/go-multibase" ) +func TestGetCidEncoder(t *testing.T) { + makeReq := func(opts map[string]any) *cmds.Request { + if opts == nil { + opts = map[string]any{} + } + return &cmds.Request{Options: opts} + } + + t.Run("no options returns default encoder", func(t *testing.T) { + enc, err := GetCidEncoder(makeReq(nil)) + if err != nil { + t.Fatal(err) + } + if enc.Upgrade { + t.Error("expected Upgrade=false with no options") + } + }) + + t.Run("non-base58btc base auto-upgrades CIDv0", func(t *testing.T) { + enc, err := GetCidEncoder(makeReq(map[string]any{ + "cid-base": "base32", + })) + if err != nil { + t.Fatal(err) + } + if !enc.Upgrade { + t.Error("expected Upgrade=true for base32") + } + if enc.Base.Encoding() != mbase.Base32 { + t.Errorf("expected base32 encoding, got %v", enc.Base.Encoding()) + } + }) + + t.Run("base58btc does not auto-upgrade", func(t *testing.T) { + enc, err := GetCidEncoder(makeReq(map[string]any{ + "cid-base": "base58btc", + })) + if err != nil { + t.Fatal(err) + } + if enc.Upgrade { + t.Error("expected Upgrade=false for base58btc") + } + }) + + t.Run("deprecated flag still works as override", func(t *testing.T) { + // Explicitly disable upgrade even with non-base58btc base + enc, err := GetCidEncoder(makeReq(map[string]any{ + "cid-base": "base32", + "upgrade-cidv0-in-output": false, + })) + if err != nil { + t.Fatal(err) + } + if enc.Upgrade { + t.Error("expected Upgrade=false when explicitly disabled") + } + + // Explicitly enable upgrade even with base58btc + enc, err = GetCidEncoder(makeReq(map[string]any{ + "cid-base": "base58btc", + "upgrade-cidv0-in-output": true, + })) + if err != nil { + t.Fatal(err) + } + if !enc.Upgrade { + t.Error("expected Upgrade=true when explicitly enabled") + } + }) +} + func TestEncoderFromPath(t *testing.T) { test := func(path string, expected cidenc.Encoder) { actual, err := CidEncoderFromPath(path) diff --git a/core/commands/cmdenv/env.go b/core/commands/cmdenv/env.go index ed7611975ca..2b601b9ffa9 100644 --- a/core/commands/cmdenv/env.go +++ b/core/commands/cmdenv/env.go @@ -6,16 +6,18 @@ import ( "strconv" "strings" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/dag/walker" "github.com/ipfs/go-cid" cmds "github.com/ipfs/go-ipfs-cmds" logging "github.com/ipfs/go-log/v2" - routing "github.com/libp2p/go-libp2p/core/routing" - "github.com/ipfs/kubo/commands" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core" coreiface "github.com/ipfs/kubo/core/coreiface" options "github.com/ipfs/kubo/core/coreiface/options" + "github.com/ipfs/kubo/core/node" + routing "github.com/libp2p/go-libp2p/core/routing" ) var log = logging.Logger("core/commands/cmdenv") @@ -107,7 +109,7 @@ func provideCIDSync(ctx context.Context, router routing.Routing, c cid.Cid) erro return router.Provide(ctx, c, true) } -// ExecuteFastProvide immediately provides a root CID to the DHT, bypassing the regular +// ExecuteFastProvideRoot immediately provides a root CID to the DHT, bypassing the regular // provide queue for faster content discovery. This function is reusable across commands // that add or import content, such as ipfs add and ipfs dag import. // @@ -129,7 +131,7 @@ func provideCIDSync(ctx context.Context, router routing.Routing, c cid.Cid) erro // The function handles all precondition checks (Provide.Enabled, DHT availability, // strategy matching) and logs appropriately. In async mode, it launches a goroutine // with a detached context and timeout. -func ExecuteFastProvide( +func ExecuteFastProvideRoot( ctx context.Context, ipfsNode *core.IpfsNode, cfg *config.Config, @@ -156,7 +158,7 @@ func ExecuteFastProvide( // Check if strategy allows providing this content strategyStr := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy) - strategy := config.ParseProvideStrategy(strategyStr) + strategy := config.MustParseProvideStrategy(strategyStr) shouldProvide := config.ShouldProvideForStrategy(strategy, isPinned, isPinnedRoot, isMFS) if !shouldProvide { @@ -176,11 +178,14 @@ func ExecuteFastProvide( return nil } - // Asynchronous mode (default): fire-and-forget, don't block, always return nil + // Asynchronous mode (default): fire-and-forget, don't block, always return nil. + // Parent off the node's lifetime context (not context.Background) so the + // goroutine cancels on daemon shutdown instead of potentially outliving + // the node and touching a closed DHT client. The timeout still bounds + // stuck DHT operations. log.Debugw("fast-provide-root: providing asynchronously", "cid", rootCid) go func() { - // Use detached context with timeout to prevent hanging on network issues - ctx, cancel := context.WithTimeout(context.Background(), config.DefaultFastProvideTimeout) + ctx, cancel := context.WithTimeout(ipfsNode.Context(), config.DefaultFastProvideTimeout) defer cancel() if err := provideCIDSync(ctx, ipfsNode.DHTClient, rootCid); err != nil { log.Warnw("fast-provide-root: async provide failed", "cid", rootCid, "error", err) @@ -190,3 +195,106 @@ func ExecuteFastProvide( }() return nil } + +// ExecuteFastProvideDAG walks the DAGs rooted at roots and provides +// CIDs according to the active Provide.Strategy. A single bloom +// tracker is shared across all roots so shared sub-DAGs are +// deduplicated. Uses an unbuffered channel for backpressure. +// +// Context handling: +// - wait=true: the walk runs inline under cmdCtx (the request +// context), so a user Ctrl+C on the command cancels the walk. +// - wait=false: the walk runs in a background goroutine under +// nodeCtx (the IpfsNode lifetime context). This lets the walk +// survive the command handler returning (go-ipfs-cmds cancels +// req.Context on handler exit) while still being cancelled on +// daemon shutdown, so the goroutine does not outlive the node +// and keep the blockstore/provider pinned open. +// +// fpRate is the bloom filter target false-positive rate (1/N), normally +// resolved from cfg.Provide.BloomFPRate by the caller. +// blockCount sizes the bloom filter (pass 0 if unknown). +func ExecuteFastProvideDAG( + cmdCtx context.Context, + nodeCtx context.Context, + roots []cid.Cid, + strategy config.ProvideStrategy, + bs blockstore.Blockstore, + prov node.DHTProvider, + wait bool, + fpRate uint, + blockCount uint, +) { + if len(roots) == 0 { + return + } + if (strategy&config.ProvideStrategyPinned) == 0 && + (strategy&config.ProvideStrategyMFS) == 0 { + return + } + + do := func(ctx context.Context) { + expectedItems := max(uint(walker.DefaultBloomInitialCapacity), blockCount) + tracker, err := walker.NewBloomTracker(expectedItems, fpRate) + if err != nil { + log.Errorf("fast-provide-dag: bloom tracker: %s", err) + return + } + + ch := make(chan cid.Cid) // unbuffered for backpressure + done := make(chan struct{}) + go func() { + defer close(done) + for c := range ch { + if err := prov.StartProviding(false, c.Hash()); err != nil { + log.Errorf("fast-provide-dag: %s: %s", c, err) + } + } + }() + + emit := func(c cid.Cid) bool { + select { + case ch <- c: + return true + case <-ctx.Done(): + return false + } + } + + opts := []walker.Option{walker.WithVisitedTracker(tracker)} + useEntities := strategy&config.ProvideStrategyEntities != 0 + + if useEntities { + fetch := walker.NodeFetcherFromBlockstore(bs) + for _, root := range roots { + if ctx.Err() != nil { + break + } + _ = walker.WalkEntityRoots(ctx, root, fetch, emit, opts...) + } + } else { + fetch := walker.LinksFetcherFromBlockstore(bs) + for _, root := range roots { + if ctx.Err() != nil { + break + } + _ = walker.WalkDAG(ctx, root, fetch, emit, opts...) + } + } + + close(ch) + <-done + log.Infow("fast-provide-dag: finished", + "providedCIDs", tracker.Count(), + "skippedBranches", tracker.Deduplicated()) + } + + if wait { + do(cmdCtx) + } else { + // Use the node's lifetime context so the walk survives + // the command handler returning (which cancels req.Context) + // but still cancels on daemon shutdown. + go do(nodeCtx) + } +} diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index 04ee581e0a1..1326f5eb69c 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -40,6 +40,7 @@ func TestCommands(t *testing.T) { "/cid/codecs", "/cid/format", "/cid/hashes", + "/cid/inspect", "/commands", "/commands/completion", "/commands/completion/bash", @@ -79,6 +80,7 @@ func TestCommands(t *testing.T) { "/diag/datastore", "/diag/datastore/count", "/diag/datastore/get", + "/diag/datastore/put", "/diag/profile", "/diag/sys", "/files", @@ -213,6 +215,11 @@ func TestCommands(t *testing.T) { "/swarm/peering/rm", "/swarm/resources", "/update", + "/update/check", + "/update/clean", + "/update/install", + "/update/revert", + "/update/versions", "/version", "/version/check", "/version/deps", diff --git a/core/commands/dag/dag.go b/core/commands/dag/dag.go index a256213ecd0..0e737ec6c11 100644 --- a/core/commands/dag/dag.go +++ b/core/commands/dag/dag.go @@ -22,6 +22,7 @@ const ( silentOptionName = "silent" statsOptionName = "stats" fastProvideRootOptionName = "fast-provide-root" + fastProvideDAGOptionName = "fast-provide-dag" fastProvideWaitOptionName = "fast-provide-wait" ) @@ -97,7 +98,7 @@ into an object of the specified format. Type: OutputObject{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *OutputObject) error { - enc, err := cmdenv.GetLowLevelCidEncoder(req) + enc, err := cmdenv.GetCidEncoder(req) if err != nil { return err } @@ -153,7 +154,7 @@ var DagResolveCmd = &cmds.Command{ // Nope, fallback on the default. fallthrough default: - enc, err = cmdenv.GetLowLevelCidEncoder(req) + enc, err = cmdenv.GetCidEncoder(req) if err != nil { return err } @@ -216,6 +217,7 @@ Specification of CAR formats: https://ipld.io/specs/transport/car/ cmds.BoolOption(silentOptionName, "No output."), cmds.BoolOption(statsOptionName, "Output stats."), cmds.BoolOption(fastProvideRootOptionName, "Immediately provide root CIDs to DHT in addition to regular queue, for faster discovery. Default: Import.FastProvideRoot"), + cmds.BoolOption(fastProvideDAGOptionName, "Walk and provide the full DAG according to Provide.Strategy after import. Default: Import.FastProvideDAG"), cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes before returning. Default: Import.FastProvideWait"), cmdutils.AllowBigBlockOption, }, @@ -244,7 +246,7 @@ Specification of CAR formats: https://ipld.io/specs/transport/car/ return fmt.Errorf("unexpected message from DAG import") } - enc, err := cmdenv.GetLowLevelCidEncoder(req) + enc, err := cmdenv.GetCidEncoder(req) if err != nil { return err } @@ -292,57 +294,15 @@ CAR file follows the CARv1 format: https://ipld.io/specs/transport/car/carv1/ }, } -// DagStat is a dag stat command response +// DagStat is a dag stat command response. Cid is stored as a +// pre-encoded string (via GetCidEncoder in the Run handler) so that +// --cid-base is respected and no custom MarshalJSON is needed. type DagStat struct { - Cid cid.Cid + Cid string `json:"Cid"` Size uint64 `json:",omitempty"` NumBlocks int64 `json:",omitempty"` } -func (s *DagStat) String() string { - return fmt.Sprintf("%s %d %d", s.Cid.String()[:20], s.Size, s.NumBlocks) -} - -func (s *DagStat) MarshalJSON() ([]byte, error) { - type Alias DagStat - /* - We can't rely on cid.Cid.MarshalJSON since it uses the {"/": "..."} - format. To make the output consistent and follow the Kubo API patterns - we use the Cid.String method - */ - return json.Marshal(struct { - Cid string `json:"Cid"` - *Alias - }{ - Cid: s.Cid.String(), - Alias: (*Alias)(s), - }) -} - -func (s *DagStat) UnmarshalJSON(data []byte) error { - /* - We can't rely on cid.Cid.UnmarshalJSON since it uses the {"/": "..."} - format. To make the output consistent and follow the Kubo API patterns - we use the Cid.Parse method - */ - type Alias DagStat - aux := struct { - Cid string `json:"Cid"` - *Alias - }{ - Alias: (*Alias)(s), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - Cid, err := cid.Parse(aux.Cid) - if err != nil { - return err - } - s.Cid = Cid - return nil -} - type DagStatSummary struct { redundantSize uint64 `json:"-"` UniqueBlocks int `json:",omitempty"` @@ -404,7 +364,7 @@ Note: This command skips duplicate blocks in reporting both size and the number fmt.Fprintln(w) csvWriter := csv.NewWriter(w) csvWriter.Comma = '\t' - cidSpacing := len(event.DagStatsArray[0].Cid.String()) + cidSpacing := len(event.DagStatsArray[0].Cid) header := []string{fmt.Sprintf("%-*s", cidSpacing, "CID"), fmt.Sprintf("%-15s", "Blocks"), "Size"} if err := csvWriter.Write(header); err != nil { return err @@ -412,7 +372,7 @@ Note: This command skips duplicate blocks in reporting both size and the number for _, dagStat := range event.DagStatsArray { numBlocksStr := fmt.Sprint(dagStat.NumBlocks) err := csvWriter.Write([]string{ - dagStat.Cid.String(), + dagStat.Cid, fmt.Sprintf("%-15s", numBlocksStr), fmt.Sprint(dagStat.Size), }) @@ -432,7 +392,6 @@ Note: This command skips duplicate blocks in reporting both size and the number }), cmds.JSON: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, event *DagStatSummary) error { return json.NewEncoder(w).Encode(event) - }, - ), + }), }, } diff --git a/core/commands/dag/import.go b/core/commands/dag/import.go index 032b9e52a6c..472533be3c1 100644 --- a/core/commands/dag/import.go +++ b/core/commands/dag/import.go @@ -51,9 +51,11 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment doPinRoots, _ := req.Options[pinRootsOptionName].(bool) fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool) + fastProvideDAG, fastProvideDAGSet := req.Options[fastProvideDAGOptionName].(bool) fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool) fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot) + fastProvideDAG = config.ResolveBoolFromConfig(fastProvideDAG, fastProvideDAGSet, cfg.Import.FastProvideDAG, config.DefaultFastProvideDAG) fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait) // grab a pinlock ( which doubles as a GC lock ) so that regardless of the @@ -113,7 +115,17 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment var previous blocks.Block - car, err := gocarv2.NewBlockReader(file) + // Wrap the file to hide the io.Seeker interface. + // Over the HTTP API the underlying reader is a multipart stream + // that cannot seek, but boxo's ReaderFile advertises io.Seeker + // anyway and returns ErrNotSupported at runtime. Hiding the + // interface lets go-car fall back to sequential (forward-only) + // reading, which is all that CARv2 streaming needs. + // See https://github.com/ipfs/kubo/issues/9361 + car, err := gocarv2.NewBlockReader(struct { + io.Reader + io.Closer + }{file, file}) if err != nil { return err } @@ -200,20 +212,34 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment } } - // Fast-provide roots for faster discovery - if fastProvideRoot { + // Provide imported content for faster discovery. + // DAG walk supersedes root-only (root is included in the walk). + if fastProvideDAG { + var rootCIDs []cid.Cid + _ = roots.ForEach(func(c cid.Cid) error { + rootCIDs = append(rootCIDs, c) + return nil + }) + cmdenv.ExecuteFastProvideDAG( + req.Context, + node.Context(), + rootCIDs, + node.ProvidingStrategy, + node.Blockstore, + node.Provider, + fastProvideWait, + uint(cfg.Provide.BloomFPRate.WithDefault(config.DefaultProvideBloomFPRate)), + 0, // block count unknown; bloom chain auto-grows + ) + } else if fastProvideRoot { err = roots.ForEach(func(c cid.Cid) error { - return cmdenv.ExecuteFastProvide(req.Context, node, cfg, c, fastProvideWait, doPinRoots, doPinRoots, false) + return cmdenv.ExecuteFastProvideRoot(req.Context, node, cfg, c, fastProvideWait, doPinRoots, doPinRoots, false) }) if err != nil { return err } } else { - if fastProvideWait { - log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config", "wait-flag-ignored", true) - } else { - log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config") - } + log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config") } return nil diff --git a/core/commands/dag/stat.go b/core/commands/dag/stat.go index 916aae71a6b..0ce1e4246f4 100644 --- a/core/commands/dag/stat.go +++ b/core/commands/dag/stat.go @@ -29,6 +29,12 @@ func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) if err != nil { return err } + + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } + nodeGetter := mdag.NewSession(req.Context, api.Dag()) cidSet := cid.NewSet() @@ -50,7 +56,7 @@ func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) if err != nil { return err } - dagstats := &DagStat{Cid: rp.RootCid()} + dagstats := &DagStat{Cid: enc.Encode(rp.RootCid())} dagStatSummary.appendStats(dagstats) err = traverse.Traverse(obj, traverse.Options{ DAG: nodeGetter, diff --git a/core/commands/diag.go b/core/commands/diag.go index 777e9445fb3..c8a48e90c58 100644 --- a/core/commands/diag.go +++ b/core/commands/diag.go @@ -7,9 +7,11 @@ import ( "io" "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/mount" "github.com/ipfs/go-datastore/query" cmds "github.com/ipfs/go-ipfs-cmds" oldcmds "github.com/ipfs/kubo/commands" + node "github.com/ipfs/kubo/core/node" fsrepo "github.com/ipfs/kubo/repo/fsrepo" ) @@ -41,7 +43,11 @@ in production workflows. The datastore format may change between versions. The daemon must not be running when calling these commands. -EXAMPLE +When the provider keystore datastores exist on disk (nodes with +Provide.DHT.SweepEnabled=true), they are automatically mounted into the +datastore view under /provider/keystore/0/ and /provider/keystore/1/. + +EXAMPLES Inspecting pubsub seqno validator state: @@ -51,10 +57,20 @@ Inspecting pubsub seqno validator state: Key: /pubsub/seqno/12D3KooW... Hex Dump: 00000000 18 81 81 c8 91 c0 ea f6 |........| + +Writing a test key (debugging only): + + $ ipfs diag datastore put /test/mykey "hello" + +Inspecting provider keystore (requires SweepEnabled): + + $ ipfs diag datastore count /provider/keystore/0/ + $ ipfs diag datastore count /provider/keystore/1/ `, }, Subcommands: map[string]*cmds.Command{ "get": diagDatastoreGetCmd, + "put": diagDatastorePutCmd, "count": diagDatastoreCountCmd, }, } @@ -67,6 +83,36 @@ type diagDatastoreGetResult struct { HexDump string `json:"hex_dump,omitempty"` } +// openDiagDatastore opens the repo datastore and conditionally mounts any +// provider keystore datastores that exist on disk. It returns the composite +// datastore and a cleanup function that must be called when done. +func openDiagDatastore(env cmds.Environment) (datastore.Datastore, func(), error) { + cctx := env.(*oldcmds.Context) + repo, err := fsrepo.Open(cctx.ConfigRoot) + if err != nil { + return nil, nil, fmt.Errorf("failed to open repo: %w", err) + } + + extraMounts, extraCloser, err := node.MountKeystoreDatastores(repo) + if err != nil { + repo.Close() + return nil, nil, err + } + + closer := func() { + extraCloser() + repo.Close() + } + + if len(extraMounts) == 0 { + return repo.Datastore(), closer, nil + } + + mounts := []mount.Mount{{Prefix: datastore.NewKey("/"), Datastore: repo.Datastore()}} + mounts = append(mounts, extraMounts...) + return mount.New(mounts), closer, nil +} + var diagDatastoreGetCmd = &cmds.Command{ Status: cmds.Experimental, Helptext: cmds.HelpText{ @@ -89,16 +135,14 @@ WARNING: FOR DEBUGGING/TESTING ONLY NoRemote: true, PreRun: DaemonNotRunning, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - cctx := env.(*oldcmds.Context) - repo, err := fsrepo.Open(cctx.ConfigRoot) + ds, closer, err := openDiagDatastore(env) if err != nil { - return fmt.Errorf("failed to open repo: %w", err) + return err } - defer repo.Close() + defer closer() keyStr := req.Arguments[0] key := datastore.NewKey(keyStr) - ds := repo.Datastore() val, err := ds.Get(req.Context, key) if err != nil { @@ -133,6 +177,42 @@ WARNING: FOR DEBUGGING/TESTING ONLY }, } +var diagDatastorePutCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Write a raw key-value pair to the datastore.", + ShortDescription: ` +Stores the given value at the specified datastore key. + +The daemon must not be running when using this command. + +WARNING: FOR DEBUGGING/TESTING ONLY +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("key", true, false, "Datastore key (e.g., /test/mykey)"), + cmds.StringArg("value", true, false, "Value to store (as a string)"), + }, + NoRemote: true, + PreRun: DaemonNotRunning, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + ds, closer, err := openDiagDatastore(env) + if err != nil { + return err + } + defer closer() + + key := datastore.NewKey(req.Arguments[0]) + if err := ds.Put(req.Context, key, []byte(req.Arguments[1])); err != nil { + return fmt.Errorf("failed to put key: %w", err) + } + if err := ds.Sync(req.Context, key); err != nil { + return fmt.Errorf("failed to sync: %w", err) + } + return nil + }, +} + type diagDatastoreCountResult struct { Prefix string `json:"prefix"` Count int64 `json:"count"` @@ -156,15 +236,13 @@ WARNING: FOR DEBUGGING/TESTING ONLY NoRemote: true, PreRun: DaemonNotRunning, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - cctx := env.(*oldcmds.Context) - repo, err := fsrepo.Open(cctx.ConfigRoot) + ds, closer, err := openDiagDatastore(env) if err != nil { - return fmt.Errorf("failed to open repo: %w", err) + return err } - defer repo.Close() + defer closer() prefix := req.Arguments[0] - ds := repo.Datastore() q := query.Query{ Prefix: prefix, diff --git a/core/commands/files.go b/core/commands/files.go index d9ab9e980f8..c47fef53354 100644 --- a/core/commands/files.go +++ b/core/commands/files.go @@ -28,7 +28,6 @@ import ( offline "github.com/ipfs/boxo/exchange/offline" dag "github.com/ipfs/boxo/ipld/merkledag" ft "github.com/ipfs/boxo/ipld/unixfs" - uio "github.com/ipfs/boxo/ipld/unixfs/io" mfs "github.com/ipfs/boxo/mfs" "github.com/ipfs/boxo/path" cid "github.com/ipfs/go-cid" @@ -505,7 +504,7 @@ being GC'ed. return err } - prefix, err := getPrefixNew(req, &cfg.Import) + prefix, err := getPrefix(req, &cfg.Import) if err != nil { return err } @@ -558,7 +557,11 @@ being GC'ed. if mkParents { maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks)) sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode() - err := ensureContainingDirectoryExists(nd.FilesRoot, dst, prefix, maxDirLinks, &sizeEstimationMode) + err := ensureContainingDirectoryExists(nd.FilesRoot, dst, + mfs.WithCidBuilder(prefix), + mfs.WithMaxLinks(maxDirLinks), + mfs.WithSizeEstimationMode(sizeEstimationMode), + ) if err != nil { return err } @@ -1060,7 +1063,7 @@ See '--to-files' in 'ipfs add --help' for more information. rawLeaves = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves) } - prefix, err := getPrefixNew(req, &cfg.Import) + prefix, err := getPrefix(req, &cfg.Import) if err != nil { return err } @@ -1073,7 +1076,11 @@ See '--to-files' in 'ipfs add --help' for more information. if mkParents { maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks)) sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode() - err := ensureContainingDirectoryExists(nd.FilesRoot, path, prefix, maxDirLinks, &sizeEstimationMode) + err := ensureContainingDirectoryExists(nd.FilesRoot, path, + mfs.WithCidBuilder(prefix), + mfs.WithMaxLinks(maxDirLinks), + mfs.WithSizeEstimationMode(sizeEstimationMode), + ) if err != nil { return err } @@ -1203,13 +1210,11 @@ Examples: maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks)) sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode() - err = mfs.Mkdir(root, dirtomake, mfs.MkdirOpts{ - Mkparents: dashp, - Flush: flush, - CidBuilder: prefix, - MaxLinks: maxDirLinks, - SizeEstimationMode: &sizeEstimationMode, - }) + err = mfs.Mkdir(root, dirtomake, mfs.MkdirOpts{Mkparents: dashp, Flush: flush}, + mfs.WithCidBuilder(prefix), + mfs.WithMaxLinks(maxDirLinks), + mfs.WithSizeEstimationMode(sizeEstimationMode), + ) return err }, @@ -1264,10 +1269,15 @@ var filesChcidCmd = &cmds.Command{ Tagline: "Change the CID version or hash function of the root node of a given path.", ShortDescription: ` Change the CID version or hash function of the root node of a given path. + +Note: the MFS root ('/') CID format is controlled by Import.CidVersion and +Import.HashFunction in the config and cannot be changed with this command. +Use 'ipfs config' to modify these values instead. This command only works +on subdirectories of the MFS root. `, }, Arguments: []cmds.Argument{ - cmds.StringArg("path", false, false, "Path to change. Default: '/'."), + cmds.StringArg("path", true, false, "Path to change (must not be '/')."), }, Options: []cmds.Option{ cidVersionOption, @@ -1279,9 +1289,10 @@ Change the CID version or hash function of the root node of a given path. return err } - path := "/" - if len(req.Arguments) > 0 { - path = req.Arguments[0] + path := req.Arguments[0] + if path == "/" { + return fmt.Errorf("cannot change CID format of the MFS root; " + + "use 'ipfs config Import.CidVersion' and 'ipfs config Import.HashFunction' instead") } flush, _ := req.Options[filesFlushOptionName].(bool) @@ -1446,97 +1457,48 @@ func removePath(filesRoot *mfs.Root, path string, force bool, dashr bool) error return pdir.Flush() } -func getPrefixNew(req *cmds.Request, importCfg *config.Import) (cid.Builder, error) { - cidVer, cidVerSet := req.Options[filesCidVersionOptionName].(int) - hashFunStr, hashFunSet := req.Options[filesHashOptionName].(string) - - // Fall back to Import config if CLI options not set - if !cidVerSet && importCfg != nil && !importCfg.CidVersion.IsDefault() { - cidVer = int(importCfg.CidVersion.WithDefault(config.DefaultCidVersion)) - cidVerSet = true - } - if !hashFunSet && importCfg != nil && !importCfg.HashFunction.IsDefault() { - hashFunStr = importCfg.HashFunction.WithDefault(config.DefaultHashFunction) - hashFunSet = true - } - - if !cidVerSet && !hashFunSet { - return nil, nil - } - - if hashFunSet && cidVer == 0 { - cidVer = 1 - } - - prefix, err := dag.PrefixForCidVersion(cidVer) - if err != nil { - return nil, err - } - - if hashFunSet { - hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)] - if !ok { - return nil, fmt.Errorf("unrecognized hash function: %s", strings.ToLower(hashFunStr)) - } - prefix.MhType = hashFunCode - prefix.MhLength = -1 - } - - return &prefix, nil -} - +// getPrefix builds a cid.Builder from CLI flags, falling back to importCfg +// when provided. Returns (nil, nil) when neither CLI nor config set a value. func getPrefix(req *cmds.Request, importCfg *config.Import) (cid.Builder, error) { cidVer, cidVerSet := req.Options[filesCidVersionOptionName].(int) hashFunStr, hashFunSet := req.Options[filesHashOptionName].(string) - // Fall back to Import config if CLI options not set - if !cidVerSet && importCfg != nil && !importCfg.CidVersion.IsDefault() { - cidVer = int(importCfg.CidVersion.WithDefault(config.DefaultCidVersion)) - cidVerSet = true - } - if !hashFunSet && importCfg != nil && !importCfg.HashFunction.IsDefault() { - hashFunStr = importCfg.HashFunction.WithDefault(config.DefaultHashFunction) - hashFunSet = true - } - - if !cidVerSet && !hashFunSet { - return nil, nil - } - - if hashFunSet && cidVer == 0 { - cidVer = 1 - } - - prefix, err := dag.PrefixForCidVersion(cidVer) - if err != nil { - return nil, err + if cidVerSet || hashFunSet { + // CLI flags take precedence: build prefix from them directly. + if hashFunSet && cidVer == 0 { + cidVer = 1 + } + prefix, err := dag.PrefixForCidVersion(cidVer) + if err != nil { + return nil, err + } + if hashFunSet { + hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)] + if !ok { + return nil, fmt.Errorf("unrecognized hash function: %q", hashFunStr) + } + prefix.MhType = hashFunCode + prefix.MhLength = -1 + } + return &prefix, nil } - if hashFunSet { - hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)] - if !ok { - return nil, fmt.Errorf("unrecognized hash function: %s", strings.ToLower(hashFunStr)) - } - prefix.MhType = hashFunCode - prefix.MhLength = -1 + // No CLI flags: fall back to Import config. + if importCfg != nil { + return importCfg.UnixFSCidBuilder() } - return &prefix, nil + return nil, nil } -func ensureContainingDirectoryExists(r *mfs.Root, path string, builder cid.Builder, maxLinks int, sizeEstimationMode *uio.SizeEstimationMode) error { +func ensureContainingDirectoryExists(r *mfs.Root, path string, opts ...mfs.Option) error { dirtomake := gopath.Dir(path) if dirtomake == "/" { return nil } - return mfs.Mkdir(r, dirtomake, mfs.MkdirOpts{ - Mkparents: true, - CidBuilder: builder, - MaxLinks: maxLinks, - SizeEstimationMode: sizeEstimationMode, - }) + return mfs.Mkdir(r, dirtomake, mfs.MkdirOpts{Mkparents: true}, opts...) } func getFileHandle(r *mfs.Root, path string, create bool, builder cid.Builder) (*mfs.File, error) { @@ -1744,6 +1706,11 @@ Examples: return errors.New("this is a potentially destructive operation; pass --confirm to proceed") } + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } + // Determine new root CID var newRootCid cid.Cid if len(req.Arguments) > 0 { @@ -1780,7 +1747,7 @@ Examples: // Special case: empty dir is always available (hardcoded in boxo) emptyDirCid := ft.EmptyDirNode().Cid() if !newRootCid.Equals(emptyDirCid) { - return fmt.Errorf("new root %s does not exist locally; fetch it first with 'ipfs block get'", newRootCid) + return fmt.Errorf("new root %s does not exist locally; fetch it first with 'ipfs block get'", enc.Encode(newRootCid)) } } @@ -1809,7 +1776,7 @@ Examples: if err == nil { oldRootCid, err := cid.Cast(oldRootBytes) if err == nil { - oldRootStr = oldRootCid.String() + oldRootStr = enc.Encode(oldRootCid) } } else if !errors.Is(err, datastore.ErrNotFound) { return fmt.Errorf("reading current MFS root: %w", err) @@ -1822,12 +1789,13 @@ Examples: } // Build output message + newRootStr := enc.Encode(newRootCid) var msg string if oldRootStr != "" { - msg = fmt.Sprintf("MFS root changed from %s to %s\n", oldRootStr, newRootCid) + msg = fmt.Sprintf("MFS root changed from %s to %s\n", oldRootStr, newRootStr) msg += fmt.Sprintf("The old root %s will be garbage collected unless pinned.\n", oldRootStr) } else { - msg = fmt.Sprintf("MFS root set to %s\n", newRootCid) + msg = fmt.Sprintf("MFS root set to %s\n", newRootStr) } return cmds.EmitOnce(res, &MessageOutput{Message: msg}) diff --git a/core/commands/keystore.go b/core/commands/keystore.go index afcdb62db5b..d4c9074c682 100644 --- a/core/commands/keystore.go +++ b/core/commands/keystore.go @@ -269,8 +269,8 @@ elsewhere. For example, using openssl to get a PEM with public key: outPath = filepath.Clean(outPath) } - // create file - file, err := os.Create(outPath) + // create file with owner-only permissions to protect private key material + file, err := os.OpenFile(outPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600) if err != nil { return err } diff --git a/core/commands/mount_nofuse.go b/core/commands/mount_nofuse.go index 2844a4b7183..98330bc3fee 100644 --- a/core/commands/mount_nofuse.go +++ b/core/commands/mount_nofuse.go @@ -1,4 +1,7 @@ -//go:build !windows && nofuse +// Stub for non-FUSE builds: the complement of mount_unix.go's +// (linux || darwin || freebsd) && !nofuse, excluding windows +// which has its own stub in mount_windows.go. +//go:build !windows && (nofuse || !(linux || darwin || freebsd)) package commands diff --git a/core/commands/mount_unix.go b/core/commands/mount_unix.go index 8ca85cdaa2e..1b350f3a1eb 100644 --- a/core/commands/mount_unix.go +++ b/core/commands/mount_unix.go @@ -1,4 +1,5 @@ -//go:build !windows && !nofuse +// Real mount command. go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse package commands @@ -130,9 +131,11 @@ baz Type: config.Mounts{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, mounts *config.Mounts) error { + // Extra space after "MFS" so "mounted at:" lines up with + // IPFS and IPNS in the column above. Matches LongDescription. fmt.Fprintf(w, "IPFS mounted at: %s\n", cmdenv.EscNonPrint(mounts.IPFS)) fmt.Fprintf(w, "IPNS mounted at: %s\n", cmdenv.EscNonPrint(mounts.IPNS)) - fmt.Fprintf(w, "MFS mounted at: %s\n", cmdenv.EscNonPrint(mounts.MFS)) + fmt.Fprintf(w, "MFS mounted at: %s\n", cmdenv.EscNonPrint(mounts.MFS)) return nil }), diff --git a/core/commands/object/diff.go b/core/commands/object/diff.go index 275f465d807..a73a6f0a834 100644 --- a/core/commands/object/diff.go +++ b/core/commands/object/diff.go @@ -97,26 +97,30 @@ Example: Type: Changes{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Changes) error { + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } verbose, _ := req.Options[verboseOptionName].(bool) for _, change := range out.Changes { if verbose { switch change.Type { case dagutils.Add: - fmt.Fprintf(w, "Added new link %q pointing to %s.\n", change.Path, change.After) + fmt.Fprintf(w, "Added new link %q pointing to %s.\n", change.Path, enc.Encode(change.After)) case dagutils.Mod: - fmt.Fprintf(w, "Changed %q from %s to %s.\n", change.Path, change.Before, change.After) + fmt.Fprintf(w, "Changed %q from %s to %s.\n", change.Path, enc.Encode(change.Before), enc.Encode(change.After)) case dagutils.Remove: - fmt.Fprintf(w, "Removed link %q (was %s).\n", change.Path, change.Before) + fmt.Fprintf(w, "Removed link %q (was %s).\n", change.Path, enc.Encode(change.Before)) } } else { switch change.Type { case dagutils.Add: - fmt.Fprintf(w, "+ %s %q\n", change.After, change.Path) + fmt.Fprintf(w, "+ %s %q\n", enc.Encode(change.After), change.Path) case dagutils.Mod: - fmt.Fprintf(w, "~ %s %s %q\n", change.Before, change.After, change.Path) + fmt.Fprintf(w, "~ %s %s %q\n", enc.Encode(change.Before), enc.Encode(change.After), change.Path) case dagutils.Remove: - fmt.Fprintf(w, "- %s %q\n", change.Before, change.Path) + fmt.Fprintf(w, "- %s %q\n", enc.Encode(change.Before), change.Path) } } } diff --git a/core/commands/object/patch.go b/core/commands/object/patch.go index 5a82dfe0b69..c41f69ce967 100644 --- a/core/commands/object/patch.go +++ b/core/commands/object/patch.go @@ -54,26 +54,43 @@ var patchRmLinkCmd = &cmds.Command{ ShortDescription: ` Remove a Merkle-link from the given object and return the hash of the result. -DEPRECATED and provided for legacy reasons. Use 'files rm' instead. +DEPRECATED and provided for legacy reasons. + +This command operates at the dag-pb level and only supports removing links +from small, flat UnixFS directories (not HAMTShard). Removing links from +files or large sharded directories will produce invalid UnixFS structures. + +For working with any UnixFS directories (including large/sharded ones), +use 'ipfs files rm' instead: 'ipfs files --help'. `, }, Arguments: []cmds.Argument{ cmds.StringArg("root", true, false, "The hash of the node to modify."), cmds.StringArg("name", true, false, "Name of the link to remove."), }, + Options: []cmds.Option{ + cmds.BoolOption(allowNonUnixFSOptionName, "", "Skip UnixFS validation, allowing link removal on non-directory nodes."), + }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) if err != nil { return err } + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } + root, err := cmdutils.PathOrCidPath(req.Arguments[0]) if err != nil { return err } name := req.Arguments[1] - p, err := api.Object().RmLink(req.Context, root, name) + allowNonUnixFS, _ := req.Options[allowNonUnixFSOptionName].(bool) + p, err := api.Object().RmLink(req.Context, root, name, + options.Object.RmLinkSkipUnixFSValidation(allowNonUnixFS)) if err != nil { return err } @@ -82,7 +99,7 @@ DEPRECATED and provided for legacy reasons. Use 'files rm' instead. return err } - return cmds.EmitOnce(res, &Object{Hash: p.RootCid().String()}) + return cmds.EmitOnce(res, &Object{Hash: enc.Encode(p.RootCid())}) }, Type: Object{}, Encoders: cmds.EncoderMap{ @@ -94,7 +111,8 @@ DEPRECATED and provided for legacy reasons. Use 'files rm' instead. } const ( - createOptionName = "create" + createOptionName = "create" + allowNonUnixFSOptionName = "allow-non-unixfs" ) var patchAddLinkCmd = &cmds.Command{ @@ -106,14 +124,19 @@ Add a Merkle-link to the given object and return the hash of the result. DEPRECATED and provided for legacy reasons. -Use MFS and 'files' commands instead: +This command operates at the dag-pb level and only supports adding links +to small, flat UnixFS directories (not HAMTShard). Adding links to files +or large sharded directories will produce invalid UnixFS structures. + +For working with any UnixFS directories (including large/sharded ones), +use MFS and 'files' commands instead: 'ipfs files --help'. $ ipfs files cp /ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn /some-dir $ ipfs files cp /ipfs/Qmayz4F4UzqcAMitTzU4zCSckDofvxstDuj3y7ajsLLEVs /some-dir/added-file.jpg $ ipfs files stat --hash /some-dir The above will add 'added-file.jpg' to the directory placed under /some-dir - and the CID of updated directory is returned by 'files stat' + and the CID of updated directory is returned by 'files stat'. 'files cp' does not download the data, only the root block, which makes it possible to build arbitrary directory trees without fetching them in full to @@ -127,6 +150,7 @@ Use MFS and 'files' commands instead: }, Options: []cmds.Option{ cmds.BoolOption(createOptionName, "p", "Create intermediary nodes."), + cmds.BoolOption(allowNonUnixFSOptionName, "", "Skip UnixFS validation, allowing links on non-directory nodes."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) @@ -134,6 +158,11 @@ Use MFS and 'files' commands instead: return err } + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } + root, err := cmdutils.PathOrCidPath(req.Arguments[0]) if err != nil { return err @@ -150,9 +179,11 @@ Use MFS and 'files' commands instead: if err != nil { return err } + allowNonUnixFS, _ := req.Options[allowNonUnixFSOptionName].(bool) p, err := api.Object().AddLink(req.Context, root, name, child, - options.Object.Create(create)) + options.Object.Create(create), + options.Object.SkipUnixFSValidation(allowNonUnixFS)) if err != nil { return err } @@ -161,7 +192,7 @@ Use MFS and 'files' commands instead: return err } - return cmds.EmitOnce(res, &Object{Hash: p.RootCid().String()}) + return cmds.EmitOnce(res, &Object{Hash: enc.Encode(p.RootCid())}) }, Type: Object{}, Encoders: cmds.EncoderMap{ diff --git a/core/commands/pin/pin.go b/core/commands/pin/pin.go index cab868c3093..1810f1b5a7b 100644 --- a/core/commands/pin/pin.go +++ b/core/commands/pin/pin.go @@ -20,6 +20,7 @@ import ( coreiface "github.com/ipfs/kubo/core/coreiface" options "github.com/ipfs/kubo/core/coreiface/options" + config "github.com/ipfs/kubo/config" core "github.com/ipfs/kubo/core" cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" "github.com/ipfs/kubo/core/commands/cmdutils" @@ -52,8 +53,11 @@ type AddPinOutput struct { } const ( - pinRecursiveOptionName = "recursive" - pinProgressOptionName = "progress" + pinRecursiveOptionName = "recursive" + pinProgressOptionName = "progress" + fastProvideRootOptionName = "fast-provide-root" + fastProvideDAGOptionName = "fast-provide-dag" + fastProvideWaitOptionName = "fast-provide-wait" ) var addPinCmd = &cmds.Command{ @@ -89,6 +93,9 @@ It may take some time. Pass '--progress' to track the progress. cmds.BoolOption(pinRecursiveOptionName, "r", "Recursively pin the object linked to by the specified object(s).").WithDefault(true), cmds.StringOption(pinNameOptionName, "n", "An optional name for created pin(s)."), cmds.BoolOption(pinProgressOptionName, "Show progress"), + cmds.BoolOption(fastProvideRootOptionName, "Immediately provide root CID to DHT after pinning. Default: Import.FastProvideRoot"), + cmds.BoolOption(fastProvideDAGOptionName, "Walk and provide the full DAG according to Provide.Strategy after pinning. Default: Import.FastProvideDAG"), + cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes. Default: Import.FastProvideWait"), }, Type: AddPinOutput{}, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { @@ -116,12 +123,15 @@ It may take some time. Pass '--progress' to track the progress. return err } + nd, fpRoot, fpDAG, fpWait := resolveFastProvideFlags(req, env) + if !showProgress { added, err := pinAddMany(req.Context, api, enc, req.Arguments, recursive, name) if err != nil { return err } + fastProvideAfterPin(req, nd, fpRoot, fpDAG, fpWait, added) return cmds.EmitOnce(res, &AddPinOutput{Pins: added}) } @@ -149,6 +159,8 @@ It may take some time. Pass '--progress' to track the progress. return val.err } + fastProvideAfterPin(req, nd, fpRoot, fpDAG, fpWait, val.pins) + if ps := v.ProgressStat(); ps.Nodes != 0 { if err := res.Emit(&AddPinOutput{Progress: ps.Nodes, Bytes: ps.Bytes}); err != nil { return err @@ -234,6 +246,77 @@ func pinAddMany(ctx context.Context, api coreiface.CoreAPI, enc cidenc.Encoder, return added, nil } +// resolveFastProvideFlags resolves --fast-provide-root, --fast-provide-dag, +// and --fast-provide-wait from CLI flags, falling back to config defaults. +// Returns the node for use by fastProvideAfterPin. +func resolveFastProvideFlags(req *cmds.Request, env cmds.Environment) (nd *core.IpfsNode, root, dag, wait bool) { + nd, err := cmdenv.GetNode(env) + if err != nil { + return nil, config.DefaultFastProvideRoot, config.DefaultFastProvideDAG, config.DefaultFastProvideWait + } + cfg, err := nd.Repo.Config() + if err != nil { + return nd, config.DefaultFastProvideRoot, config.DefaultFastProvideDAG, config.DefaultFastProvideWait + } + fpRoot, fpRootSet := req.Options[fastProvideRootOptionName].(bool) + fpDAG, fpDAGSet := req.Options[fastProvideDAGOptionName].(bool) + fpWait, fpWaitSet := req.Options[fastProvideWaitOptionName].(bool) + root = config.ResolveBoolFromConfig(fpRoot, fpRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot) + dag = config.ResolveBoolFromConfig(fpDAG, fpDAGSet, cfg.Import.FastProvideDAG, config.DefaultFastProvideDAG) + wait = config.ResolveBoolFromConfig(fpWait, fpWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait) + return nd, root, dag, wait +} + +// fastProvideAfterPin handles both root and DAG providing after a +// successful pin operation. Best-effort: errors are logged but do not +// fail the pin command. +func fastProvideAfterPin(req *cmds.Request, nd *core.IpfsNode, fpRoot, fpDAG, fpWait bool, encodedCIDs []string) { + if !fpRoot && !fpDAG { + return + } + cfg, err := nd.Repo.Config() + if err != nil { + return + } + var cidList []cid.Cid + for _, s := range encodedCIDs { + c, err := cid.Decode(s) + if err != nil { + continue + } + cidList = append(cidList, c) + } + + if fpDAG { + // DAG walk includes the root CID (DFS pre-order emits it + // first), so a separate root provide is not needed. + // Single call with all roots shares one bloom tracker. + cmdenv.ExecuteFastProvideDAG( + req.Context, + nd.Context(), + cidList, + nd.ProvidingStrategy, + nd.Blockstore, + nd.Provider, + fpWait, + uint(cfg.Provide.BloomFPRate.WithDefault(config.DefaultProvideBloomFPRate)), + 0, // block count unknown; bloom chain auto-grows + ) + } else if fpRoot { + for _, c := range cidList { + if err := cmdenv.ExecuteFastProvideRoot( + req.Context, nd, cfg, c, + fpWait, + true, // isPinned + true, // isPinnedRoot + false, // isMFS + ); err != nil { + log.Errorf("fast provide root after pin: %s", err) + } + } + } +} + var rmPinCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Remove object from pin-list.", @@ -570,7 +653,7 @@ func pinLsAll(req *cmds.Request, typeStr string, detailed bool, name string, api opt, err := options.Pin.Ls.Type(typeStr) if err != nil { - panic("unhandled pin type") + return err } pins := make(chan coreiface.Pin) @@ -622,6 +705,9 @@ pin. }, Options: []cmds.Option{ cmds.BoolOption(pinUnpinOptionName, "Remove the old pin.").WithDefault(true), + cmds.BoolOption(fastProvideRootOptionName, "Immediately provide new root CID to DHT after update. Default: Import.FastProvideRoot"), + cmds.BoolOption(fastProvideDAGOptionName, "Walk and provide the full DAG according to Provide.Strategy after update. Default: Import.FastProvideDAG"), + cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes. Default: Import.FastProvideWait"), }, Type: PinOutput{}, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { @@ -662,6 +748,9 @@ pin. return err } + nd, fpRoot, fpDAG, fpWait := resolveFastProvideFlags(req, env) + fastProvideAfterPin(req, nd, fpRoot, fpDAG, fpWait, []string{enc.Encode(to.RootCid())}) + return cmds.EmitOnce(res, &PinOutput{Pins: []string{enc.Encode(from.RootCid()), enc.Encode(to.RootCid())}}) }, Encoders: cmds.EncoderMap{ diff --git a/core/commands/pin/remotepin.go b/core/commands/pin/remotepin.go index 3936ce635df..a99d7aef9fa 100644 --- a/core/commands/pin/remotepin.go +++ b/core/commands/pin/remotepin.go @@ -17,6 +17,7 @@ import ( pinclient "github.com/ipfs/boxo/pinning/remote/client" cid "github.com/ipfs/go-cid" + cidenc "github.com/ipfs/go-cidutil/cidenc" cmds "github.com/ipfs/go-ipfs-cmds" logging "github.com/ipfs/go-log/v2" config "github.com/ipfs/kubo/config" @@ -73,11 +74,11 @@ type RemotePinOutput struct { Name string } -func toRemotePinOutput(ps pinclient.PinStatusGetter) RemotePinOutput { +func toRemotePinOutput(ps pinclient.PinStatusGetter, enc cidenc.Encoder) RemotePinOutput { return RemotePinOutput{ Name: ps.GetPin().GetName(), Status: ps.GetStatus().String(), - Cid: ps.GetPin().GetCid().String(), + Cid: enc.Encode(ps.GetPin().GetCid()), } } @@ -143,6 +144,11 @@ NOTE: a comma-separated notation is supported in CLI for convenience: ctx, cancel := context.WithCancel(req.Context) defer cancel() + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } + // Get remote service c, err := getRemotePinServiceFromRequest(req, env) if err != nil { @@ -257,7 +263,7 @@ NOTE: a comma-separated notation is supported in CLI for convenience: } } - return res.Emit(toRemotePinOutput(ps)) + return res.Emit(toRemotePinOutput(ps, enc)) }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *RemotePinOutput) error { @@ -294,6 +300,11 @@ Pass '--status=queued,pinning,pinned,failed' to list pins in all states. return err } + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } + ctx, cancel := context.WithCancel(req.Context) defer cancel() @@ -303,7 +314,7 @@ Pass '--status=queued,pinning,pinned,failed' to list pins in all states. lsErr <- lsRemote(ctx, req, c, psCh) }() for ps := range psCh { - if err := res.Emit(toRemotePinOutput(ps)); err != nil { + if err := res.Emit(toRemotePinOutput(ps, enc)); err != nil { return err } } diff --git a/core/commands/refs.go b/core/commands/refs.go index 53c92c3df04..56b0ca6339f 100644 --- a/core/commands/refs.go +++ b/core/commands/refs.go @@ -149,6 +149,11 @@ Displays the hashes of all local objects. NOTE: This treats all local objects as return err } + enc, err := cmdenv.GetCidEncoder(req) + if err != nil { + return err + } + // todo: make async allKeys, err := n.Blockstore.AllKeysChan(ctx) if err != nil { @@ -156,7 +161,7 @@ Displays the hashes of all local objects. NOTE: This treats all local objects as } for k := range allKeys { - err := res.Emit(&RefWrapper{Ref: k.String()}) + err := res.Emit(&RefWrapper{Ref: enc.Encode(k)}) if err != nil { return err } diff --git a/core/commands/repo_verify_test.go b/core/commands/repo_verify_test.go index 4b6b65a0750..30b3cd2cf90 100644 --- a/core/commands/repo_verify_test.go +++ b/core/commands/repo_verify_test.go @@ -1,3 +1,4 @@ +// Requires Go 1.25+ for testing/synctest. //go:build go1.25 package commands diff --git a/core/commands/root.go b/core/commands/root.go index d70a49376b9..57c1a912805 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -81,7 +81,7 @@ TOOL COMMANDS config Manage configuration version Show IPFS version information diag Generate diagnostic reports - update Download and apply go-ipfs updates + update Update Kubo to a different version commands List all available commands log Manage and show logs of running daemon @@ -157,7 +157,7 @@ var rootSubcommands = map[string]*cmds.Command{ "refs": RefsCmd, "resolve": ResolveCmd, "swarm": SwarmCmd, - "update": ExternalBinary("Please see https://github.com/ipfs/ipfs-update/blob/master/README.md#install for installation instructions."), + "update": UpdateCmd, "version": VersionCmd, "shutdown": daemonShutdownCmd, "cid": CidCmd, diff --git a/core/commands/routing.go b/core/commands/routing.go index 629c3051527..280b14174ce 100644 --- a/core/commands/routing.go +++ b/core/commands/routing.go @@ -269,11 +269,16 @@ var provideRefRoutingCmd = &cmds.Command{ } var reprovideRoutingCmd = &cmds.Command{ - Status: cmds.Experimental, + Status: cmds.Deprecated, Helptext: cmds.HelpText{ - Tagline: "Trigger reprovider.", + Tagline: "Trigger reprovider (legacy provider only).", ShortDescription: ` Trigger reprovider to announce our data to network. + +Only available with the legacy provider (Provide.DHT.SweepEnabled=false). +Returns an error when Provide.DHT.SweepEnabled=true (the default). +The sweep provider reprovides automatically on schedule. +Use 'ipfs provide stat -a' to monitor reprovide progress. `, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { @@ -286,7 +291,6 @@ Trigger reprovider to announce our data to network. return ErrNotOnline } - // respect global config cfg, err := nd.Repo.Config() if err != nil { return err @@ -299,7 +303,9 @@ Trigger reprovider to announce our data to network. } provideSys, ok := nd.Provider.(provider.Reprovider) if !ok { - return errors.New("manual reprovide only available with legacy provider (Provide.DHT.SweepEnabled=false)") + err := errors.New("invalid configuration: manual reprovide not available with sweep provider (Provide.DHT.SweepEnabled=true), use 'ipfs provide stat -a' to monitor automatic reprovide progress") + log.Error(err) + return err } err = provideSys.Reprovide(req.Context) diff --git a/core/commands/update.go b/core/commands/update.go new file mode 100644 index 00000000000..6ffc96c383a --- /dev/null +++ b/core/commands/update.go @@ -0,0 +1,848 @@ +package commands + +import ( + "archive/tar" + "archive/zip" + "bytes" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "slices" + "strings" + "time" + + goversion "github.com/hashicorp/go-version" + cmds "github.com/ipfs/go-ipfs-cmds" + version "github.com/ipfs/kubo" + "github.com/ipfs/kubo/repo/fsrepo" + "github.com/ipfs/kubo/repo/fsrepo/migrations" + "github.com/ipfs/kubo/repo/fsrepo/migrations/atomicfile" +) + +const ( + updatePreOptionName = "pre" + updateCountOptionName = "count" + updateAllowDowngradeOptionName = "allow-downgrade" + + // updateDefaultTimeout is the fallback timeout for update operations + // when the user does not pass --timeout. One hour allows for slow + // connections downloading ~50 MB archives. + updateDefaultTimeout = 1 * time.Hour + + // maxBinarySize caps the decompressed binary size to prevent zip/tar + // bombs. Current kubo binary is ~120 MB uncompressed; 1 GB leaves + // room for growth while catching decompression attacks. + maxBinarySize = 1 << 30 + + // stashDirName is the directory under $IPFS_PATH where backups of + // previously installed Kubo binaries are kept so 'update revert' can + // restore them and 'update clean' can free the space. + stashDirName = "old-bin" +) + +// UpdateCmd is the "ipfs update" command tree. +var UpdateCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Update Kubo to a different version", + ShortDescription: ` +Downloads pre-built Kubo binaries from GitHub Releases, verifies +checksums, and replaces the running binary in place. The previous +binary is saved so you can revert if needed. + +The daemon must be stopped before installing or reverting. +`, + LongDescription: ` +Downloads pre-built Kubo binaries from GitHub Releases, verifies +checksums, and replaces the running binary in place. The previous +binary is saved so you can revert if needed. + +The daemon must be stopped before installing or reverting. + +ENVIRONMENT VARIABLES + + HTTPS_PROXY + HTTP proxy for reaching GitHub. Set this when GitHub is not + directly reachable from your network. + Example: HTTPS_PROXY=http://proxy:8080 ipfs update install + + GITHUB_TOKEN + GitHub personal access token. Raises the API rate limit from + 60 to 5000 requests per hour. Set this if you hit "rate limit + exceeded" errors. GH_TOKEN is also accepted. + + IPFS_PATH + Determines where binary backups are stored ($IPFS_PATH/old-bin/). + Defaults to ~/.ipfs. +`, + }, + NoRemote: true, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)), + Subcommands: map[string]*cmds.Command{ + "check": updateCheckCmd, + "versions": updateVersionsCmd, + "install": updateInstallCmd, + "revert": updateRevertCmd, + "clean": updateCleanCmd, + }, +} + +// -- check -- + +// UpdateCheckOutput is the output of "ipfs update check". +type UpdateCheckOutput struct { + CurrentVersion string + LatestVersion string + UpdateAvailable bool +} + +var updateCheckCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Check if a newer Kubo version is available", + ShortDescription: ` +Queries GitHub Releases for the latest Kubo version and compares +it against the currently running binary. Only considers releases +with binaries available for your operating system and architecture. + +Works while the daemon is running (read-only, no repo access). + +ENVIRONMENT VARIABLES + + HTTPS_PROXY HTTP proxy for reaching GitHub API. + GITHUB_TOKEN Raises the API rate limit (GH_TOKEN also accepted). +`, + }, + NoRemote: true, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)), + Options: []cmds.Option{ + cmds.BoolOption(updatePreOptionName, "Include pre-release versions."), + }, + Type: UpdateCheckOutput{}, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + ctx, cancel := updateContext(req) + defer cancel() + includePre, _ := req.Options[updatePreOptionName].(bool) + + rel, err := githubLatestRelease(ctx, includePre) + if err != nil { + return fmt.Errorf("checking for updates: %w", err) + } + + latest := trimVPrefix(rel.TagName) + current := currentVersion() + + updateAvailable, err := isNewerVersion(current, latest) + if err != nil { + return err + } + + return cmds.EmitOnce(res, &UpdateCheckOutput{ + CurrentVersion: current, + LatestVersion: latest, + UpdateAvailable: updateAvailable, + }) + }, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *UpdateCheckOutput) error { + if out.UpdateAvailable { + fmt.Fprintf(w, "Update available: %s -> %s\n", out.CurrentVersion, out.LatestVersion) + fmt.Fprintln(w, "Run 'ipfs update install' to install the latest version.") + } else { + fmt.Fprintf(w, "Already up to date (%s)\n", out.CurrentVersion) + } + return nil + }), + }, +} + +// -- versions -- + +// UpdateVersionsOutput is the output of "ipfs update versions". +type UpdateVersionsOutput struct { + Current string + Versions []string +} + +var updateVersionsCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "List available Kubo versions", + ShortDescription: ` +Lists Kubo versions published on GitHub Releases. The currently +running version is marked with an asterisk (*). +`, + }, + NoRemote: true, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)), + Options: []cmds.Option{ + cmds.IntOption(updateCountOptionName, "n", "Number of versions to list.").WithDefault(30), + cmds.BoolOption(updatePreOptionName, "Include pre-release versions."), + }, + Type: UpdateVersionsOutput{}, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + ctx, cancel := updateContext(req) + defer cancel() + count, _ := req.Options[updateCountOptionName].(int) + if count <= 0 { + count = 30 + } + includePre, _ := req.Options[updatePreOptionName].(bool) + + releases, err := githubListReleases(ctx, count, includePre) + if err != nil { + return fmt.Errorf("listing versions: %w", err) + } + + versions := make([]string, 0, len(releases)) + for _, r := range releases { + versions = append(versions, trimVPrefix(r.TagName)) + } + + return cmds.EmitOnce(res, &UpdateVersionsOutput{ + Current: currentVersion(), + Versions: versions, + }) + }, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *UpdateVersionsOutput) error { + for _, v := range out.Versions { + marker := " " + if v == out.Current { + marker = "* " + } + fmt.Fprintf(w, "%s%s\n", marker, v) + } + return nil + }), + }, +} + +// -- install -- + +// UpdateInstallOutput is the output of "ipfs update install". +type UpdateInstallOutput struct { + OldVersion string + NewVersion string + BinaryPath string + StashedTo string +} + +var updateInstallCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Download and install a Kubo update", + ShortDescription: ` +Downloads the specified version (or latest) from GitHub Releases, +verifies the SHA-512 checksum, saves a backup of the current binary, +and atomically replaces it. + +If replacing the binary fails due to file permissions, the new binary +is saved to a temporary directory and the path is printed so you can +move it manually (e.g. with sudo). + +Previous binaries are kept in $IPFS_PATH/old-bin/ and can be +restored with 'ipfs update revert'. +`, + }, + NoRemote: true, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)), + Arguments: []cmds.Argument{ + cmds.StringArg("version", false, false, "Version to install (default: latest)."), + }, + Options: []cmds.Option{ + cmds.BoolOption(updatePreOptionName, "Include pre-release versions when resolving latest."), + cmds.BoolOption(updateAllowDowngradeOptionName, "Allow installing an older version."), + }, + Type: UpdateInstallOutput{}, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + ctx, cancel := updateContext(req) + defer cancel() + + if err := checkDaemonNotRunning(); err != nil { + return err + } + + current := currentVersion() + includePre, _ := req.Options[updatePreOptionName].(bool) + allowDowngrade, _ := req.Options[updateAllowDowngradeOptionName].(bool) + + // Resolve target version. + var tag string + if len(req.Arguments) > 0 && req.Arguments[0] != "" { + tag = normalizeVersion(req.Arguments[0]) + } else { + rel, err := githubLatestRelease(ctx, includePre) + if err != nil { + return fmt.Errorf("finding latest release: %w", err) + } + tag = rel.TagName + } + target := trimVPrefix(tag) + + // Compare versions. + if target == current { + return fmt.Errorf("already running version %s", current) + } + + newer, err := isNewerVersion(current, target) + if err != nil { + return err + } + if !newer && !allowDowngrade { + return fmt.Errorf("version %s is older than current %s (use --allow-downgrade to force)", target, current) + } + + // Download, verify, and extract before touching the current binary. + fmt.Fprintf(os.Stderr, "Downloading Kubo %s...\n", target) + + _, asset, err := findReleaseAsset(ctx, normalizeVersion(target)) + if err != nil { + return err + } + + data, err := downloadAsset(ctx, asset.BrowserDownloadURL) + if err != nil { + return err + } + + if err := downloadAndVerifySHA512(ctx, data, asset.BrowserDownloadURL); err != nil { + return fmt.Errorf("checksum verification failed: %w", err) + } + fmt.Fprintln(os.Stderr, "Checksum verified (SHA-512).") + + binData, err := extractBinaryFromArchive(data) + if err != nil { + return fmt.Errorf("extracting binary: %w", err) + } + + // Resolve current binary path. + binPath, err := os.Executable() + if err != nil { + return fmt.Errorf("finding current binary: %w", err) + } + binPath, err = filepath.EvalSymlinks(binPath) + if err != nil { + return fmt.Errorf("resolving binary path: %w", err) + } + + // Stash current binary, then replace it. + stashedTo, err := stashBinary(binPath, current) + if err != nil { + return fmt.Errorf("backing up current binary: %w", err) + } + fmt.Fprintf(os.Stderr, "Backed up current binary to %s\n", stashedTo) + + if err := replaceBinary(binPath, binData); err != nil { + // Permission error fallback: save to a unique temp file. + if errors.Is(err, os.ErrPermission) { + tmpPath, writeErr := writeBinaryToTempFile(binData, target) + if writeErr != nil { + return fmt.Errorf("cannot write fallback binary: %w (original error: %v)", writeErr, err) + } + fmt.Fprintf(os.Stderr, "Could not replace %s (permission denied).\n", binPath) + fmt.Fprintf(os.Stderr, "New binary saved to: %s\n", tmpPath) + fmt.Fprintf(os.Stderr, "Move it manually, e.g.: sudo mv %s %s\n", tmpPath, binPath) + return cmds.EmitOnce(res, &UpdateInstallOutput{ + OldVersion: current, + NewVersion: target, + BinaryPath: tmpPath, + StashedTo: stashedTo, + }) + } + return fmt.Errorf("replacing binary: %w", err) + } + + fmt.Fprintf(os.Stderr, "Successfully updated Kubo %s -> %s\n", current, target) + + return cmds.EmitOnce(res, &UpdateInstallOutput{ + OldVersion: current, + NewVersion: target, + BinaryPath: binPath, + StashedTo: stashedTo, + }) + }, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *UpdateInstallOutput) error { + // All status output goes to stderr in Run; text encoder is a no-op. + return nil + }), + }, +} + +// -- revert -- + +// UpdateRevertOutput is the output of "ipfs update revert". +type UpdateRevertOutput struct { + RestoredVersion string + BinaryPath string +} + +var updateRevertCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Revert to a previously installed Kubo version", + ShortDescription: ` +Restores the most recently backed up binary from $IPFS_PATH/old-bin/. +The backup is created automatically by 'ipfs update install'. +`, + }, + NoRemote: true, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)), + Type: UpdateRevertOutput{}, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + if err := checkDaemonNotRunning(); err != nil { + return err + } + + stashDir, err := getStashDir() + if err != nil { + return err + } + + stashPath, stashVer, err := findLatestStash(stashDir) + if err != nil { + return err + } + + stashData, err := os.ReadFile(stashPath) + if err != nil { + return fmt.Errorf("reading stashed binary: %w", err) + } + + binPath, err := os.Executable() + if err != nil { + return fmt.Errorf("finding current binary: %w", err) + } + binPath, err = filepath.EvalSymlinks(binPath) + if err != nil { + return fmt.Errorf("resolving binary path: %w", err) + } + + if err := replaceBinary(binPath, stashData); err != nil { + if errors.Is(err, os.ErrPermission) { + tmpPath, writeErr := writeBinaryToTempFile(stashData, stashVer) + if writeErr != nil { + return fmt.Errorf("cannot write fallback binary: %w (original error: %v)", writeErr, err) + } + fmt.Fprintf(os.Stderr, "Could not replace %s (permission denied).\n", binPath) + fmt.Fprintf(os.Stderr, "Reverted binary saved to: %s\n", tmpPath) + fmt.Fprintf(os.Stderr, "Move it manually, e.g.: sudo mv %s %s\n", tmpPath, binPath) + return cmds.EmitOnce(res, &UpdateRevertOutput{ + RestoredVersion: stashVer, + BinaryPath: tmpPath, + }) + } + return fmt.Errorf("replacing binary: %w", err) + } + + // Remove the stash file that was restored. + os.Remove(stashPath) + + fmt.Fprintf(os.Stderr, "Reverted to Kubo %s\n", stashVer) + + return cmds.EmitOnce(res, &UpdateRevertOutput{ + RestoredVersion: stashVer, + BinaryPath: binPath, + }) + }, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *UpdateRevertOutput) error { + return nil + }), + }, +} + +// -- clean -- + +// UpdateCleanOutput is the output of "ipfs update clean". +type UpdateCleanOutput struct { + Removed []string + BytesFreed int64 +} + +var updateCleanCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Remove backups of previous Kubo versions", + ShortDescription: ` +Deletes every backed-up Kubo binary from $IPFS_PATH/old-bin/ to free +disk space. After running this, 'ipfs update revert' will have nothing +to roll back to. + +Files in $IPFS_PATH/old-bin/ that do not match the 'ipfs-' +naming convention are left untouched. + +Safe to run while the daemon is up: only the backup directory is +touched, never the running binary. +`, + }, + NoRemote: true, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)), + Type: UpdateCleanOutput{}, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + repoPath, err := fsrepo.BestKnownPath() + if err != nil { + return fmt.Errorf("determining IPFS path: %w", err) + } + dir := filepath.Join(repoPath, stashDirName) + + stashes, err := listStashes(dir) + if err != nil { + // A missing stash directory just means there is nothing to clean. + if errors.Is(err, os.ErrNotExist) { + return cmds.EmitOnce(res, &UpdateCleanOutput{}) + } + return fmt.Errorf("reading stash directory: %w", err) + } + + out := &UpdateCleanOutput{ + Removed: make([]string, 0, len(stashes)), + } + for _, s := range stashes { + if err := os.Remove(s.path); err != nil { + return fmt.Errorf("removing %s: %w", s.path, err) + } + out.Removed = append(out.Removed, s.name) + out.BytesFreed += s.size + } + return cmds.EmitOnce(res, out) + }, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *UpdateCleanOutput) error { + if len(out.Removed) == 0 { + fmt.Fprintln(w, "No stashed binaries to remove.") + return nil + } + for _, name := range out.Removed { + fmt.Fprintf(w, "Removed %s\n", name) + } + fmt.Fprintf(w, "Freed %.1f MiB across %d files.\n", + float64(out.BytesFreed)/(1<<20), len(out.Removed)) + return nil + }), + }, +} + +// -- helpers -- + +// updateContext returns a context for update operations. If the user +// passed --timeout, req.Context already carries that deadline and is +// returned as-is. Otherwise a fallback of updateDefaultTimeout is applied +// so HTTP calls cannot hang indefinitely. +func updateContext(req *cmds.Request) (context.Context, context.CancelFunc) { + ctx := req.Context + if _, ok := ctx.Deadline(); ok { + return ctx, func() {} + } + return context.WithTimeout(ctx, updateDefaultTimeout) +} + +// currentVersion returns the version string used by update commands. +// TEST_KUBO_VERSION overrides the reported version; the TEST_ prefix +// signals it is a test-only escape hatch used by integration tests in +// test/cli/update_test.go and should never be set in production. +func currentVersion() string { + if v := os.Getenv("TEST_KUBO_VERSION"); v != "" { + return v + } + return version.CurrentVersionNumber +} + +// checkDaemonNotRunning returns an error if the IPFS daemon is running. +func checkDaemonNotRunning() error { + repoPath, err := fsrepo.BestKnownPath() + if err != nil { + // Without a repo path we can't check the lock, but we shouldn't + // silently proceed either. Warn so the user notices a misconfigured + // IPFS_PATH instead of getting an unexplained install. + fmt.Fprintf(os.Stderr, "Warning: could not determine IPFS path, skipping daemon check: %v\n", err) + return nil + } + locked, err := fsrepo.LockedByOtherProcess(repoPath) + if err != nil { + // Lock check failed (e.g. repo doesn't exist yet), not an error. + fmt.Fprintf(os.Stderr, "Warning: could not check daemon lock at %s: %v\n", repoPath, err) + return nil + } + if locked { + return fmt.Errorf("IPFS daemon is running (repo locked at %s). Stop it first with 'ipfs shutdown'", repoPath) + } + return nil +} + +// getStashDir returns the path to the stash directory, creating it if needed. +func getStashDir() (string, error) { + repoPath, err := fsrepo.BestKnownPath() + if err != nil { + return "", fmt.Errorf("determining IPFS path: %w", err) + } + dir := filepath.Join(repoPath, stashDirName) + if err := os.MkdirAll(dir, 0o755); err != nil { + return "", fmt.Errorf("creating stash directory: %w", err) + } + return dir, nil +} + +// stashBinary copies the current binary to the stash directory. +// Uses named returns so the deferred dst.Close() error is not silently +// discarded -- a failed close means the backup may be incomplete. +func stashBinary(binPath, ver string) (stashPath string, err error) { + dir, err := getStashDir() + if err != nil { + return "", err + } + + stashName := migrations.ExeName(fmt.Sprintf("ipfs-%s", ver)) + stashPath = filepath.Join(dir, stashName) + + src, err := os.Open(binPath) + if err != nil { + return "", fmt.Errorf("opening current binary: %w", err) + } + defer src.Close() + + dst, err := os.OpenFile(stashPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o755) + if err != nil { + return "", fmt.Errorf("creating stash file: %w", err) + } + defer func() { + if cerr := dst.Close(); cerr != nil && err == nil { + err = fmt.Errorf("writing stash file: %w", cerr) + } + }() + + if _, err = io.Copy(dst, src); err != nil { + return "", fmt.Errorf("copying binary to stash: %w", err) + } + if err = dst.Sync(); err != nil { + return "", fmt.Errorf("syncing stash file: %w", err) + } + + return stashPath, nil +} + +// stashEntry describes a single backed-up Kubo binary in the stash directory. +type stashEntry struct { + path string + name string + ver string + parsed *goversion.Version + size int64 +} + +// listStashes returns every stashed binary in dir, newest first. Files that +// do not match the "ipfs-" naming convention are skipped so the +// directory can hold unrelated user files without breaking revert/clean. +func listStashes(dir string) ([]stashEntry, error) { + entries, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + + var stashes []stashEntry + for _, e := range entries { + if e.IsDir() { + continue + } + name := e.Name() + // Expected format: ipfs- or ipfs-.exe + trimmed := strings.TrimPrefix(name, "ipfs-") + if trimmed == name { + continue // doesn't match pattern + } + trimmed = strings.TrimSuffix(trimmed, ".exe") + parsed, parseErr := goversion.NewVersion(trimmed) + if parseErr != nil { + continue + } + var size int64 + if info, err := e.Info(); err == nil { + size = info.Size() + } + stashes = append(stashes, stashEntry{ + path: filepath.Join(dir, name), + name: name, + ver: trimmed, + parsed: parsed, + size: size, + }) + } + + slices.SortFunc(stashes, func(a, b stashEntry) int { + // Sort newest first: if a > b return -1. + if a.parsed.GreaterThan(b.parsed) { + return -1 + } + if b.parsed.GreaterThan(a.parsed) { + return 1 + } + return 0 + }) + + return stashes, nil +} + +// findLatestStash finds the most recently versioned stash file. +func findLatestStash(dir string) (path, ver string, err error) { + stashes, err := listStashes(dir) + if err != nil { + return "", "", fmt.Errorf("reading stash directory: %w", err) + } + if len(stashes) == 0 { + return "", "", fmt.Errorf("no stashed binaries found in %s", dir) + } + return stashes[0].path, stashes[0].ver, nil +} + +// replaceBinary atomically replaces the binary at targetPath with data. +func replaceBinary(targetPath string, data []byte) error { + af, err := atomicfile.New(targetPath, 0o755) + if err != nil { + return err + } + + if _, err := af.Write(data); err != nil { + _ = af.Abort() + return err + } + + return af.Close() +} + +// writeBinaryToTempFile writes data to a uniquely named executable file +// in the system temp directory and returns its path. +func writeBinaryToTempFile(data []byte, ver string) (path string, err error) { + pattern := migrations.ExeName(fmt.Sprintf("ipfs-%s-*", ver)) + f, err := os.CreateTemp("", pattern) + if err != nil { + return "", fmt.Errorf("creating temp file: %w", err) + } + defer func() { + if cerr := f.Close(); cerr != nil && err == nil { + err = fmt.Errorf("closing temp file: %w", cerr) + } + if err != nil { + os.Remove(f.Name()) + } + }() + + if _, err = f.Write(data); err != nil { + return "", fmt.Errorf("writing temp file: %w", err) + } + if err = f.Sync(); err != nil { + return "", fmt.Errorf("syncing temp file: %w", err) + } + if err = f.Chmod(0o755); err != nil { + return "", fmt.Errorf("chmod temp file: %w", err) + } + return f.Name(), nil +} + +// extractBinaryFromArchive extracts the kubo/ipfs binary from a tar.gz or zip archive. +func extractBinaryFromArchive(data []byte) ([]byte, error) { + binName := migrations.ExeName("ipfs") + + // Try tar.gz first (Unix releases), then zip (Windows releases). + result, tarErr := extractFromTarGz(data, binName) + if tarErr == nil { + return result, nil + } + + result, zipErr := extractFromZip(data, binName) + if zipErr == nil { + return result, nil + } + + return nil, fmt.Errorf("could not find ipfs binary in archive (expected kubo/%s): tar.gz: %v, zip: %v", binName, tarErr, zipErr) +} + +func extractFromTarGz(data []byte, binName string) ([]byte, error) { + gzr, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer gzr.Close() + + tr := tar.NewReader(gzr) + lookFor := "kubo/" + binName + for { + hdr, err := tr.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + if hdr.Name == lookFor { + result, readErr := io.ReadAll(io.LimitReader(tr, maxBinarySize+1)) + if readErr != nil { + return nil, readErr + } + if int64(len(result)) > maxBinarySize { + return nil, fmt.Errorf("extracted binary exceeds maximum size of %d bytes", maxBinarySize) + } + return result, nil + } + } + return nil, fmt.Errorf("%s not found in tar.gz", lookFor) +} + +func extractFromZip(data []byte, binName string) ([]byte, error) { + zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + return nil, err + } + + lookFor := "kubo/" + binName + for _, f := range zr.File { + if f.Name != lookFor { + continue + } + rc, err := f.Open() + if err != nil { + return nil, err + } + result, err := io.ReadAll(io.LimitReader(rc, maxBinarySize+1)) + rc.Close() + if err != nil { + return nil, err + } + if int64(len(result)) > maxBinarySize { + return nil, fmt.Errorf("extracted binary exceeds maximum size of %d bytes", maxBinarySize) + } + return result, nil + } + return nil, fmt.Errorf("%s not found in zip", lookFor) +} + +// trimVPrefix removes a leading "v" from a version string. +func trimVPrefix(s string) string { + return strings.TrimPrefix(s, "v") +} + +// normalizeVersion ensures a version string has a "v" prefix (for GitHub tags). +func normalizeVersion(s string) string { + s = strings.TrimSpace(s) + if !strings.HasPrefix(s, "v") { + return "v" + s + } + return s +} + +// isNewerVersion returns true if target is newer than current. +func isNewerVersion(current, target string) (bool, error) { + cv, err := goversion.NewVersion(current) + if err != nil { + return false, fmt.Errorf("parsing current version %q: %w", current, err) + } + tv, err := goversion.NewVersion(target) + if err != nil { + return false, fmt.Errorf("parsing target version %q: %w", target, err) + } + return tv.GreaterThan(cv), nil +} diff --git a/core/commands/update_github.go b/core/commands/update_github.go new file mode 100644 index 00000000000..64eb532cf25 --- /dev/null +++ b/core/commands/update_github.go @@ -0,0 +1,278 @@ +package commands + +// This file implements fetching Kubo release binaries from GitHub Releases. +// +// We use GitHub Releases instead of dist.ipfs.tech because GitHub is harder +// to censor. Many networks and regions block or interfere with IPFS-specific +// infrastructure, but GitHub is widely accessible and its TLS-protected API +// is difficult to selectively block without breaking many other services. + +import ( + "bytes" + "context" + "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "runtime" + "strings" + + version "github.com/ipfs/kubo" +) + +const ( + githubOwner = "ipfs" + githubRepo = "kubo" + + githubAPIBase = "https://api.github.com" + + // maxDownloadSize is the maximum allowed binary archive size (200 MB). + maxDownloadSize = 200 << 20 +) + +// githubReleaseFmt is the default GitHub Releases API URL prefix. +// It is a var (not const) so unit tests can point API calls at a mock server. +var githubReleaseFmt = githubAPIBase + "/repos/" + githubOwner + "/" + githubRepo + "/releases" + +// githubReleaseBaseURL returns the Releases API base URL. It normally +// returns githubReleaseFmt. +// +// If TEST_KUBO_UPDATE_GITHUB_URL is set, that value is used instead. +// This is a test-only escape hatch -- the TEST_ prefix is the gate, +// signaling that production users should never set it. The integration +// tests in test/cli/update_test.go use it to redirect API calls to a +// local httptest mock server so the install pipeline can be exercised +// without hitting real GitHub. +func githubReleaseBaseURL() string { + if u := os.Getenv("TEST_KUBO_UPDATE_GITHUB_URL"); u != "" { + return u + } + return githubReleaseFmt +} + +// ghRelease represents a GitHub release. +type ghRelease struct { + TagName string `json:"tag_name"` + Prerelease bool `json:"prerelease"` + Assets []ghAsset `json:"assets"` +} + +// ghAsset represents a release asset on GitHub. +type ghAsset struct { + Name string `json:"name"` + Size int64 `json:"size"` + BrowserDownloadURL string `json:"browser_download_url"` +} + +// githubGet performs an authenticated GET request to the GitHub API. +// It honors GITHUB_TOKEN or GH_TOKEN env vars to avoid the 60 req/hr +// unauthenticated rate limit. +func githubGet(ctx context.Context, url string) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", "application/vnd.github+json") + req.Header.Set("User-Agent", "kubo/"+version.CurrentVersionNumber) + + if token := githubToken(); token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusForbidden || resp.StatusCode == http.StatusTooManyRequests { + resp.Body.Close() + hint := "" + if githubToken() == "" { + hint = " (hint: set GITHUB_TOKEN or GH_TOKEN to avoid rate limits)" + } + return nil, fmt.Errorf("GitHub API rate limit exceeded%s", hint) + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return nil, fmt.Errorf("GitHub API returned HTTP %d for %s", resp.StatusCode, url) + } + + return resp, nil +} + +func githubToken() string { + if t := os.Getenv("GITHUB_TOKEN"); t != "" { + return t + } + return os.Getenv("GH_TOKEN") +} + +// githubLatestRelease returns the newest release that has a platform asset +// for the current GOOS/GOARCH. This avoids false positives when a release +// tag exists but artifacts haven't been uploaded yet. +func githubLatestRelease(ctx context.Context, includePre bool) (*ghRelease, error) { + releases, err := githubListReleases(ctx, 10, includePre) + if err != nil { + return nil, err + } + + for i := range releases { + want := assetNameForPlatformTag(releases[i].TagName) + for _, a := range releases[i].Assets { + if a.Name == want { + return &releases[i], nil + } + } + } + return nil, fmt.Errorf("no release found with a binary for %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// githubListReleases fetches up to count releases, optionally including prereleases. +func githubListReleases(ctx context.Context, count int, includePre bool) ([]ghRelease, error) { + // Fetch more than needed so we can filter prereleases and still return count results. + perPage := count + if !includePre { + perPage = count * 3 + } + if perPage > 100 { + perPage = 100 + } + + url := fmt.Sprintf("%s?per_page=%d", githubReleaseBaseURL(), perPage) + resp, err := githubGet(ctx, url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var all []ghRelease + if err := json.NewDecoder(resp.Body).Decode(&all); err != nil { + return nil, fmt.Errorf("decoding GitHub releases: %w", err) + } + + var filtered []ghRelease + for _, r := range all { + if !includePre && r.Prerelease { + continue + } + filtered = append(filtered, r) + if len(filtered) >= count { + break + } + } + return filtered, nil +} + +// githubReleaseByTag fetches a single release by its git tag. +func githubReleaseByTag(ctx context.Context, tag string) (*ghRelease, error) { + url := fmt.Sprintf("%s/tags/%s", githubReleaseBaseURL(), tag) + resp, err := githubGet(ctx, url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var rel ghRelease + if err := json.NewDecoder(resp.Body).Decode(&rel); err != nil { + return nil, fmt.Errorf("decoding GitHub release: %w", err) + } + return &rel, nil +} + +// findReleaseAsset locates the platform-appropriate asset in a release. +// It fails immediately with a clear message if: +// - the release tag does not exist on GitHub (typo, unreleased version) +// - the release exists but has no binary for this OS/arch (CI still building) +func findReleaseAsset(ctx context.Context, tag string) (*ghRelease, *ghAsset, error) { + rel, err := githubReleaseByTag(ctx, tag) + if err != nil { + return nil, nil, fmt.Errorf("release %s not found on GitHub: %w", tag, err) + } + + want := assetNameForPlatformTag(tag) + for i := range rel.Assets { + if rel.Assets[i].Name == want { + return rel, &rel.Assets[i], nil + } + } + + return nil, nil, fmt.Errorf( + "release %s exists but has no binary for %s/%s yet; build artifacts may still be uploading, try again in a few hours", + tag, runtime.GOOS, runtime.GOARCH) +} + +// downloadAsset downloads a release asset by its browser_download_url. +// This hits GitHub's CDN directly, not the API, so no auth headers are needed. +func downloadAsset(ctx context.Context, url string) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", "kubo/"+version.CurrentVersionNumber) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("downloading asset: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("download returned HTTP %d", resp.StatusCode) + } + + data, err := io.ReadAll(io.LimitReader(resp.Body, maxDownloadSize+1)) + if err != nil { + return nil, fmt.Errorf("reading download: %w", err) + } + if int64(len(data)) > maxDownloadSize { + return nil, fmt.Errorf("download exceeds maximum size of %d bytes", maxDownloadSize) + } + return data, nil +} + +// downloadAndVerifySHA512 downloads the .sha512 sidecar file for the given +// archive URL and verifies the archive data against it. +func downloadAndVerifySHA512(ctx context.Context, data []byte, archiveURL string) error { + sha512URL := archiveURL + ".sha512" + checksumData, err := downloadAsset(ctx, sha512URL) + if err != nil { + return fmt.Errorf("downloading checksum file: %w", err) + } + + // Parse " \n" format (standard sha512sum output). + fields := strings.Fields(string(checksumData)) + if len(fields) < 1 { + return fmt.Errorf("empty or malformed .sha512 file") + } + wantHex := fields[0] + + return verifySHA512(data, wantHex) +} + +// verifySHA512 checks that data matches the given hex-encoded SHA-512 hash. +func verifySHA512(data []byte, wantHex string) error { + want, err := hex.DecodeString(wantHex) + if err != nil { + return fmt.Errorf("invalid hex in SHA-512 checksum: %w", err) + } + got := sha512.Sum512(data) + if !bytes.Equal(got[:], want) { + return fmt.Errorf("SHA-512 mismatch: expected %s, got %x", wantHex, got[:]) + } + return nil +} + +// assetNameForPlatformTag returns the expected archive filename for a given +// release tag and the current GOOS/GOARCH. +func assetNameForPlatformTag(tag string) string { + ext := "tar.gz" + if runtime.GOOS == "windows" { + ext = "zip" + } + return fmt.Sprintf("kubo_%s_%s-%s.%s", tag, runtime.GOOS, runtime.GOARCH, ext) +} diff --git a/core/commands/update_github_test.go b/core/commands/update_github_test.go new file mode 100644 index 00000000000..a72129b682c --- /dev/null +++ b/core/commands/update_github_test.go @@ -0,0 +1,428 @@ +package commands + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha512" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// --- SHA-512 verification --- +// +// These tests verify the integrity-checking code that protects users from +// tampered or corrupted downloads. A broken hash check could allow +// installing a malicious binary, so each failure mode must be covered. + +// TestVerifySHA512 exercises the low-level hash comparison function. +func TestVerifySHA512(t *testing.T) { + t.Parallel() + data := []byte("hello world") + sum := sha512.Sum512(data) + validHex := fmt.Sprintf("%x", sum[:]) + + t.Run("accepts matching hash", func(t *testing.T) { + t.Parallel() + err := verifySHA512(data, validHex) + assert.NoError(t, err) + }) + + t.Run("rejects data that does not match hash", func(t *testing.T) { + t.Parallel() + err := verifySHA512([]byte("tampered"), validHex) + assert.ErrorContains(t, err, "SHA-512 mismatch", + "must reject data whose hash differs from the expected value") + }) + + t.Run("rejects malformed hex string", func(t *testing.T) { + t.Parallel() + err := verifySHA512(data, "not-valid-hex") + assert.ErrorContains(t, err, "invalid hex in SHA-512 checksum") + }) +} + +// TestDownloadAndVerifySHA512 tests the complete download-and-verify flow: +// fetching a .sha512 sidecar file from alongside the archive URL, parsing +// the standard sha512sum format (" \n"), and comparing +// against the archive data. This is the function called by "ipfs update install". +func TestDownloadAndVerifySHA512(t *testing.T) { + t.Parallel() + archiveData := []byte("fake-archive-content") + sum := sha512.Sum512(archiveData) + checksumBody := fmt.Sprintf("%x kubo_v0.41.0_linux-amd64.tar.gz\n", sum[:]) + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/archive.tar.gz.sha512": + _, _ = w.Write([]byte(checksumBody)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + t.Cleanup(srv.Close) + + t.Run("accepts archive matching sidecar hash", func(t *testing.T) { + t.Parallel() + err := downloadAndVerifySHA512(t.Context(), archiveData, srv.URL+"/archive.tar.gz") + assert.NoError(t, err) + }) + + t.Run("rejects archive with wrong content", func(t *testing.T) { + t.Parallel() + err := downloadAndVerifySHA512(t.Context(), []byte("tampered"), srv.URL+"/archive.tar.gz") + assert.ErrorContains(t, err, "SHA-512 mismatch", + "must hard-fail when downloaded archive doesn't match the published checksum") + }) + + t.Run("fails when sidecar file is missing", func(t *testing.T) { + t.Parallel() + err := downloadAndVerifySHA512(t.Context(), archiveData, srv.URL+"/no-such-file.tar.gz") + assert.ErrorContains(t, err, "downloading checksum file", + "must fail if the .sha512 sidecar can't be fetched") + }) +} + +// --- GitHub API layer --- + +// TestGitHubGet verifies the low-level GitHub API helper that adds +// authentication headers and translates HTTP errors into actionable +// messages (especially rate-limit hints for unauthenticated users). +func TestGitHubGet(t *testing.T) { + t.Parallel() + + t.Run("sets Accept and User-Agent headers", func(t *testing.T) { + t.Parallel() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "application/vnd.github+json", r.Header.Get("Accept"), + "must request GitHub's v3 JSON format") + assert.Contains(t, r.Header.Get("User-Agent"), "kubo/", + "User-Agent must identify the kubo version for debugging") + _, _ = w.Write([]byte("{}")) + })) + t.Cleanup(srv.Close) + + resp, err := githubGet(t.Context(), srv.URL) + require.NoError(t, err) + resp.Body.Close() + }) + + t.Run("returns rate-limit error on HTTP 403", func(t *testing.T) { + t.Parallel() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusForbidden) + })) + t.Cleanup(srv.Close) + + _, err := githubGet(t.Context(), srv.URL) + assert.ErrorContains(t, err, "rate limit exceeded") + }) + + t.Run("returns rate-limit error on HTTP 429", func(t *testing.T) { + t.Parallel() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusTooManyRequests) + })) + t.Cleanup(srv.Close) + + _, err := githubGet(t.Context(), srv.URL) + assert.ErrorContains(t, err, "rate limit exceeded") + }) + + t.Run("returns HTTP status on server error", func(t *testing.T) { + t.Parallel() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + t.Cleanup(srv.Close) + + _, err := githubGet(t.Context(), srv.URL) + assert.ErrorContains(t, err, "HTTP 500") + }) +} + +// TestGitHubListReleases verifies that release listing correctly filters +// prereleases and respects the count limit. Uses a mock GitHub API server +// to avoid network dependencies and rate limits in CI. +// +// Not parallel: temporarily overrides the package-level githubReleaseFmt var. +func TestGitHubListReleases(t *testing.T) { + allReleases := []ghRelease{ + {TagName: "v0.42.0-rc1", Prerelease: true}, + {TagName: "v0.41.0"}, + {TagName: "v0.40.0"}, + } + body, err := json.Marshal(allReleases) + require.NoError(t, err) + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write(body) + })) + t.Cleanup(srv.Close) + + saved := githubReleaseFmt + githubReleaseFmt = srv.URL + t.Cleanup(func() { githubReleaseFmt = saved }) + + t.Run("excludes prereleases by default", func(t *testing.T) { + got, err := githubListReleases(t.Context(), 10, false) + require.NoError(t, err) + assert.Len(t, got, 2, "the rc1 prerelease should be filtered out") + assert.Equal(t, "v0.41.0", got[0].TagName) + assert.Equal(t, "v0.40.0", got[1].TagName) + }) + + t.Run("includes prereleases when requested", func(t *testing.T) { + got, err := githubListReleases(t.Context(), 10, true) + require.NoError(t, err) + assert.Len(t, got, 3) + assert.Equal(t, "v0.42.0-rc1", got[0].TagName) + }) + + t.Run("respects count limit", func(t *testing.T) { + got, err := githubListReleases(t.Context(), 1, false) + require.NoError(t, err) + assert.Len(t, got, 1, "should return at most 1 release") + }) +} + +// TestGitHubLatestRelease verifies that the "find latest release" logic +// skips releases that don't have a binary for the current OS/arch. +// This handles the real-world case where a release tag is created but +// CI hasn't finished uploading build artifacts yet. +// +// Not parallel: temporarily overrides the package-level githubReleaseFmt var. +func TestGitHubLatestRelease(t *testing.T) { + releases := []ghRelease{ + { + TagName: "v0.42.0", + Assets: []ghAsset{{Name: "kubo_v0.42.0_some-other-arch.tar.gz"}}, + }, + { + TagName: "v0.41.0", + Assets: []ghAsset{{Name: assetNameForPlatformTag("v0.41.0")}}, + }, + } + body, err := json.Marshal(releases) + require.NoError(t, err) + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write(body) + })) + t.Cleanup(srv.Close) + + saved := githubReleaseFmt + githubReleaseFmt = srv.URL + t.Cleanup(func() { githubReleaseFmt = saved }) + + rel, err := githubLatestRelease(t.Context(), false) + require.NoError(t, err) + assert.Equal(t, "v0.41.0", rel.TagName, + "should skip v0.42.0 (no binary for %s/%s) and return v0.41.0", + runtime.GOOS, runtime.GOARCH) +} + +// TestFindReleaseAsset verifies that findReleaseAsset locates the correct +// platform-specific asset in a release, and returns a clear error when the +// release exists but has no binary for the current OS/arch. +// +// Not parallel: temporarily overrides the package-level githubReleaseFmt var. +func TestFindReleaseAsset(t *testing.T) { + wantAsset := assetNameForPlatformTag("v0.50.0") + + release := ghRelease{ + TagName: "v0.50.0", + Assets: []ghAsset{ + {Name: "kubo_v0.50.0_some-other-arch.tar.gz", BrowserDownloadURL: "https://example.com/other"}, + {Name: wantAsset, BrowserDownloadURL: "https://example.com/correct"}, + }, + } + body, err := json.Marshal(release) + require.NoError(t, err) + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write(body) + })) + t.Cleanup(srv.Close) + + saved := githubReleaseFmt + githubReleaseFmt = srv.URL + t.Cleanup(func() { githubReleaseFmt = saved }) + + t.Run("returns matching asset for current platform", func(t *testing.T) { + rel, asset, err := findReleaseAsset(t.Context(), "v0.50.0") + require.NoError(t, err) + assert.Equal(t, "v0.50.0", rel.TagName) + assert.Equal(t, wantAsset, asset.Name) + assert.Equal(t, "https://example.com/correct", asset.BrowserDownloadURL) + }) + + t.Run("returns error when no asset matches current platform", func(t *testing.T) { + // Serve a release that only has an asset for a different arch. + noMatch := ghRelease{ + TagName: "v0.51.0", + Assets: []ghAsset{{Name: "kubo_v0.51.0_plan9-mips.tar.gz"}}, + } + noMatchBody, err := json.Marshal(noMatch) + require.NoError(t, err) + + noMatchSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write(noMatchBody) + })) + t.Cleanup(noMatchSrv.Close) + + githubReleaseFmt = noMatchSrv.URL + + _, _, err = findReleaseAsset(t.Context(), "v0.51.0") + assert.ErrorContains(t, err, "has no binary for", + "should explain that the release exists but lacks a matching asset") + }) +} + +// --- Asset download --- + +// TestDownloadAsset verifies the HTTP download helper that fetches release +// archives from GitHub's CDN. Tests both the happy path and HTTP error +// reporting. +func TestDownloadAsset(t *testing.T) { + t.Parallel() + + t.Run("downloads content successfully", func(t *testing.T) { + t.Parallel() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write([]byte("binary-content")) + })) + t.Cleanup(srv.Close) + + data, err := downloadAsset(t.Context(), srv.URL) + require.NoError(t, err) + assert.Equal(t, []byte("binary-content"), data) + }) + + t.Run("returns clear error on HTTP failure", func(t *testing.T) { + t.Parallel() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + t.Cleanup(srv.Close) + + _, err := downloadAsset(t.Context(), srv.URL) + assert.ErrorContains(t, err, "HTTP 404") + }) +} + +// --- Archive extraction --- + +// TestExtractBinaryFromArchive verifies that the ipfs binary can be +// extracted from release archives. Kubo releases use tar.gz on Unix +// and zip on Windows, with the binary at "kubo/ipfs" inside the archive. +func TestExtractBinaryFromArchive(t *testing.T) { + t.Parallel() + + t.Run("extracts binary from valid tar.gz", func(t *testing.T) { + t.Parallel() + wantContent := []byte("#!/bin/fake-ipfs-binary") + archive := makeTarGz(t, "kubo/ipfs", wantContent) + + got, err := extractBinaryFromArchive(archive) + require.NoError(t, err) + assert.Equal(t, wantContent, got) + }) + + t.Run("rejects archive without kubo/ipfs entry", func(t *testing.T) { + t.Parallel() + // A valid tar.gz that contains a file at the wrong path. + archive := makeTarGz(t, "wrong-path/ipfs", []byte("binary")) + + _, err := extractBinaryFromArchive(archive) + assert.ErrorContains(t, err, "could not find ipfs binary") + }) + + t.Run("rejects non-archive data", func(t *testing.T) { + t.Parallel() + _, err := extractBinaryFromArchive([]byte("not an archive")) + assert.ErrorContains(t, err, "could not find ipfs binary") + }) +} + +// makeTarGz creates an in-memory tar.gz archive containing a single file. +func makeTarGz(t *testing.T, path string, content []byte) []byte { + t.Helper() + var buf bytes.Buffer + gzw := gzip.NewWriter(&buf) + tw := tar.NewWriter(gzw) + require.NoError(t, tw.WriteHeader(&tar.Header{ + Name: path, + Mode: 0o755, + Size: int64(len(content)), + })) + _, err := tw.Write(content) + require.NoError(t, err) + require.NoError(t, tw.Close()) + require.NoError(t, gzw.Close()) + return buf.Bytes() +} + +// --- Asset name and version helpers --- + +// TestAssetNameForPlatformTag ensures the archive filename matches the +// naming convention used by Kubo's CI release pipeline: +// +// kubo__-. +func TestAssetNameForPlatformTag(t *testing.T) { + t.Parallel() + name := assetNameForPlatformTag("v0.41.0") + assert.Contains(t, name, fmt.Sprintf("kubo_v0.41.0_%s-%s.", runtime.GOOS, runtime.GOARCH)) + + if runtime.GOOS == "windows" { + assert.Contains(t, name, ".zip") + } else { + assert.Contains(t, name, ".tar.gz") + } +} + +// TestVersionHelpers exercises the version string utilities used throughout +// the update command. These handle the mismatch between Go's semver +// (no "v" prefix) and GitHub's tag convention ("v" prefix). +func TestVersionHelpers(t *testing.T) { + t.Parallel() + + t.Run("trimVPrefix strips leading v", func(t *testing.T) { + t.Parallel() + assert.Equal(t, "0.41.0", trimVPrefix("v0.41.0")) + assert.Equal(t, "0.41.0", trimVPrefix("0.41.0"), "no-op when v is absent") + }) + + t.Run("normalizeVersion adds v prefix for GitHub tags", func(t *testing.T) { + t.Parallel() + assert.Equal(t, "v0.41.0", normalizeVersion("0.41.0")) + assert.Equal(t, "v0.41.0", normalizeVersion("v0.41.0"), "no-op when v is present") + assert.Equal(t, "v0.41.0", normalizeVersion(" v0.41.0 "), "trims whitespace") + }) + + t.Run("isNewerVersion compares semver correctly", func(t *testing.T) { + t.Parallel() + tests := []struct { + current, target string + wantNewer bool + desc string + }{ + {"0.40.0", "0.41.0", true, "newer minor version"}, + {"0.41.0", "0.40.0", false, "older minor version"}, + {"0.41.0", "0.41.0", false, "same version"}, + {"0.41.0-dev", "0.41.0", true, "release is newer than dev pre-release"}, + } + for _, tt := range tests { + got, err := isNewerVersion(tt.current, tt.target) + require.NoError(t, err) + assert.Equal(t, tt.wantNewer, got, tt.desc) + } + }) +} diff --git a/core/coreapi/coreapi.go b/core/coreapi/coreapi.go index eca9fd989de..f2736a125d3 100644 --- a/core/coreapi/coreapi.go +++ b/core/coreapi/coreapi.go @@ -28,6 +28,7 @@ import ( "github.com/ipfs/kubo/config" coreiface "github.com/ipfs/kubo/core/coreiface" "github.com/ipfs/kubo/core/coreiface/options" + "github.com/ipfs/kubo/internal/fusemount" pubsub "github.com/libp2p/go-libp2p-pubsub" record "github.com/libp2p/go-libp2p-record" ci "github.com/libp2p/go-libp2p/core/crypto" @@ -69,12 +70,11 @@ type CoreAPI struct { ipldPathResolver pathresolver.Resolver unixFSPathResolver pathresolver.Resolver - provider node.DHTProvider - providingStrategy config.ProvideStrategy + provider node.DHTProvider pubSub *pubsub.PubSub - checkPublishAllowed func() error + checkPublishAllowed func(ctx context.Context) error checkOnline func(allowOffline bool) error // ONLY for re-applying options in WithOptions, DO NOT USE ANYWHERE ELSE @@ -185,8 +185,7 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e ipldPathResolver: n.IPLDPathResolver, unixFSPathResolver: n.UnixFSPathResolver, - provider: n.Provider, - providingStrategy: n.ProvidingStrategy, + provider: n.Provider, pubSub: n.PubSub, @@ -201,7 +200,10 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e return nil } - subAPI.checkPublishAllowed = func() error { + subAPI.checkPublishAllowed = func(ctx context.Context) error { + if fusemount.IsPublish(ctx) { + return nil + } if n.Mounts.Ipns != nil && n.Mounts.Ipns.IsActive() { return errors.New("cannot manually publish while IPNS is mounted") } diff --git a/core/coreapi/name.go b/core/coreapi/name.go index 5e7971698e5..2793a4efdd8 100644 --- a/core/coreapi/name.go +++ b/core/coreapi/name.go @@ -28,7 +28,7 @@ func (api *NameAPI) Publish(ctx context.Context, p path.Path, opts ...caopts.Nam ctx, span := tracing.Span(ctx, "CoreAPI.NameAPI", "Publish", trace.WithAttributes(attribute.String("path", p.String()))) defer span.End() - if err := api.checkPublishAllowed(); err != nil { + if err := api.checkPublishAllowed(ctx); err != nil { return ipns.Name{}, err } diff --git a/core/coreapi/object.go b/core/coreapi/object.go index 0f6c2747a57..c7c911ebc0d 100644 --- a/core/coreapi/object.go +++ b/core/coreapi/object.go @@ -2,6 +2,7 @@ package coreapi import ( "context" + "fmt" dag "github.com/ipfs/boxo/ipld/merkledag" "github.com/ipfs/boxo/ipld/merkledag/dagutils" @@ -56,6 +57,37 @@ func (api *ObjectAPI) AddLink(ctx context.Context, base path.Path, name string, return path.ImmutablePath{}, dag.ErrNotProtobuf } + // This command operates at the dag-pb level via dagutils.Editor, which + // only manipulates ProtoNode links without updating UnixFS metadata. + // Only plain UnixFS Directory nodes are safe to mutate this way. + // File nodes: adding links corrupts Blocksizes, content lost on read-back. + // HAMTShard nodes: bitfield not updated, shard trie becomes inconsistent. + // https://specs.ipfs.tech/unixfs/#pbnode-links-name + // https://github.com/ipfs/kubo/issues/7190 + if !options.SkipUnixFSValidation { + fsNode, err := ft.FSNodeFromBytes(basePb.Data()) + if err != nil { + return path.ImmutablePath{}, fmt.Errorf( + "cannot add named links to a non-UnixFS dag-pb node; " + + "pass --allow-non-unixfs to skip validation") + } + switch fsNode.Type() { + case ft.TDirectory: + // plain directories: safe, no link-count metadata to desync + case ft.THAMTShard: + return path.ImmutablePath{}, fmt.Errorf( + "cannot add links to a HAMTShard at the dag-pb level " + + "(would corrupt the HAMT bitfield); use 'ipfs files' " + + "commands instead, or pass --allow-non-unixfs to override") + default: + return path.ImmutablePath{}, fmt.Errorf( + "cannot add named links to a UnixFS %s node, "+ + "only Directory nodes support link addition at the dag-pb level "+ + "(see https://specs.ipfs.tech/unixfs/)", + fsNode.Type()) + } + } + var createfunc func() *dag.ProtoNode if options.Create { createfunc = ft.EmptyDirNode @@ -76,13 +108,18 @@ func (api *ObjectAPI) AddLink(ctx context.Context, base path.Path, name string, return path.FromCid(nnode.Cid()), nil } -func (api *ObjectAPI) RmLink(ctx context.Context, base path.Path, link string) (path.ImmutablePath, error) { +func (api *ObjectAPI) RmLink(ctx context.Context, base path.Path, link string, opts ...caopts.ObjectRmLinkOption) (path.ImmutablePath, error) { ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "RmLink", trace.WithAttributes( attribute.String("base", base.String()), attribute.String("link", link)), ) defer span.End() + options, err := caopts.ObjectRmLinkOptions(opts...) + if err != nil { + return path.ImmutablePath{}, err + } + baseNd, err := api.core().ResolveNode(ctx, base) if err != nil { return path.ImmutablePath{}, err @@ -93,6 +130,32 @@ func (api *ObjectAPI) RmLink(ctx context.Context, base path.Path, link string) ( return path.ImmutablePath{}, dag.ErrNotProtobuf } + // Same validation as AddLink: dagutils.Editor operates at the dag-pb + // level and cannot update UnixFS metadata (HAMT bitfields, Blocksizes). + if !options.SkipUnixFSValidation { + fsNode, err := ft.FSNodeFromBytes(basePb.Data()) + if err != nil { + return path.ImmutablePath{}, fmt.Errorf( + "cannot remove links from a non-UnixFS dag-pb node; " + + "pass --allow-non-unixfs to skip validation") + } + switch fsNode.Type() { + case ft.TDirectory: + // plain directories: safe, no link-count metadata to desync + case ft.THAMTShard: + return path.ImmutablePath{}, fmt.Errorf( + "cannot remove links from a HAMTShard at the dag-pb level " + + "(would corrupt the HAMT bitfield); use 'ipfs files rm' " + + "instead, or pass --allow-non-unixfs to override") + default: + return path.ImmutablePath{}, fmt.Errorf( + "cannot remove links from a UnixFS %s node, "+ + "only Directory nodes support link removal at the dag-pb level "+ + "(see https://specs.ipfs.tech/unixfs/)", + fsNode.Type()) + } + } + e := dagutils.NewDagEditor(basePb, api.dag) err = e.RmLink(ctx, link) diff --git a/core/coreapi/unixfs.go b/core/coreapi/unixfs.go index 729b4851a34..cd6f42344ee 100644 --- a/core/coreapi/unixfs.go +++ b/core/coreapi/unixfs.go @@ -16,7 +16,6 @@ import ( uio "github.com/ipfs/boxo/ipld/unixfs/io" "github.com/ipfs/boxo/mfs" "github.com/ipfs/boxo/path" - "github.com/ipfs/boxo/provider" cid "github.com/ipfs/go-cid" cidutil "github.com/ipfs/go-cidutil" ds "github.com/ipfs/go-datastore" @@ -28,7 +27,6 @@ import ( options "github.com/ipfs/kubo/core/coreiface/options" "github.com/ipfs/kubo/core/coreunix" "github.com/ipfs/kubo/tracing" - mh "github.com/multiformats/go-multihash" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) @@ -110,19 +108,21 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options var dserv ipld.DAGService = merkledag.NewDAGService(bserv) - // wrap the DAGService in a providingDAG service which provides every block written. - // note about strategies: - // - "all" gets handled directly at the blockstore so no need to provide - // - "roots" gets handled in the pinner - // - "mfs" gets handled in mfs - // We need to provide the "pinned" cases only. Added blocks are not - // going to be provided by the blockstore (wrong strategy for that), - // nor by the pinner (the pinner doesn't traverse the pinned DAG itself, it only - // handles roots). This wrapping ensures all blocks of pinned content get provided. - if settings.Pin && !settings.OnlyHash && - (api.providingStrategy&config.ProvideStrategyPinned) != 0 { - dserv = &providingDagService{dserv, api.provider} - } + // Per-block providing for new content is handled outside the add + // pipeline: + // + // - Provide.Strategy=all: every block is provided at the + // blockstore level via the blockstore.Provider hook + // (see core/node/storage.go). + // - Selective strategies (pinned, mfs, +unique, +entities) with + // --fast-provide-dag: ExecuteFastProvideDAG walks the DAG once + // after add completes, applying the active strategy and bloom + // dedup. Wiring lives in core/commands/add.go. + // - --fast-provide-root only (default): the root CID is announced + // immediately via ExecuteFastProvideRoot in the command handler. + // + // The coreapi layer therefore does not wrap the DAGService with + // any providing logic. // add a sync call to the DagService // this ensures that data written to the DagService is persisted to the underlying datastore @@ -147,9 +147,8 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options } // Note: the dag service gets wrapped multiple times: - // 1. providingDagService (if pinned strategy) - provides blocks as they're added - // 2. syncDagService - ensures data persistence - // 3. batchingDagService (in coreunix.Adder) - batches operations for efficiency + // 1. syncDagService - ensures data persistence + // 2. batchingDagService (in coreunix.Adder) - batches operations for efficiency fileAdder, err := coreunix.NewAdder(ctx, pinning, addblockstore, syncDserv) if err != nil { @@ -393,39 +392,3 @@ type syncDagService struct { func (s *syncDagService) Sync() error { return s.syncFn() } - -type providingDagService struct { - ipld.DAGService - provider.MultihashProvider -} - -func (pds *providingDagService) Add(ctx context.Context, n ipld.Node) error { - if err := pds.DAGService.Add(ctx, n); err != nil { - return err - } - // Provider errors are logged but not propagated. - // We don't want DAG operations to fail due to providing issues. - // The user's data is still stored successfully even if the - // announcement to the routing system fails temporarily. - if err := pds.StartProviding(false, n.Cid().Hash()); err != nil { - log.Errorf("failed to provide new block: %s", err) - } - return nil -} - -func (pds *providingDagService) AddMany(ctx context.Context, nds []ipld.Node) error { - if err := pds.DAGService.AddMany(ctx, nds); err != nil { - return err - } - keys := make([]mh.Multihash, len(nds)) - for i, n := range nds { - keys[i] = n.Cid().Hash() - } - // Same error handling philosophy as Add(): log but don't fail. - if err := pds.StartProviding(false, keys...); err != nil { - log.Errorf("failed to provide new blocks: %s", err) - } - return nil -} - -var _ ipld.DAGService = (*providingDagService)(nil) diff --git a/core/corehttp/commands.go b/core/corehttp/commands.go index bd91406c94a..d8ced5851ac 100644 --- a/core/corehttp/commands.go +++ b/core/corehttp/commands.go @@ -146,9 +146,7 @@ func commandsOption(cctx oldcmds.Context, command *cmds.Command) ServeOption { cmdHandler = withAuthSecrets(authorizations, cmdHandler) } - cmdHandler = otelhttp.NewHandler(cmdHandler, "corehttp.cmdsHandler", - otelhttp.WithMetricAttributesFn(staticServerDomainAttrFn("api")), - ) + cmdHandler = otelhttp.NewHandler(withMetricLabels(cmdHandler, staticServerDomainAttrFn("api")), "corehttp.cmdsHandler") mux.Handle(APIPath+"/", cmdHandler) return mux, nil } diff --git a/core/corehttp/gateway.go b/core/corehttp/gateway.go index 325cebcf69c..43452cdd9b6 100644 --- a/core/corehttp/gateway.go +++ b/core/corehttp/gateway.go @@ -44,11 +44,10 @@ func GatewayOption(paths ...string) ServeOption { handler := gateway.NewHandler(config, backend) handler = gateway.NewHeaders(headers).ApplyCors().Wrap(handler) - var otelOpts []otelhttp.Option if fn := newServerDomainAttrFn(n); fn != nil { - otelOpts = append(otelOpts, otelhttp.WithMetricAttributesFn(fn)) + handler = withMetricLabels(handler, fn) } - handler = otelhttp.NewHandler(handler, "Gateway", otelOpts...) + handler = otelhttp.NewHandler(handler, "Gateway") for _, p := range paths { mux.Handle(p+"/", handler) @@ -75,11 +74,10 @@ func HostnameOption() ServeOption { var handler http.Handler handler = gateway.NewHostnameHandler(config, backend, childMux) handler = gateway.NewHeaders(headers).ApplyCors().Wrap(handler) - var otelOpts []otelhttp.Option if fn := newServerDomainAttrFn(n); fn != nil { - otelOpts = append(otelOpts, otelhttp.WithMetricAttributesFn(fn)) + handler = withMetricLabels(handler, fn) } - handler = otelhttp.NewHandler(handler, "HostnameGateway", otelOpts...) + handler = otelhttp.NewHandler(handler, "HostnameGateway") mux.Handle("/", handler) return childMux, nil @@ -131,9 +129,7 @@ func Libp2pGatewayOption() ServeOption { } handler := gateway.NewHandler(gwConfig, &offlineGatewayErrWrapper{gwimpl: backend}) - handler = otelhttp.NewHandler(handler, "Libp2p-Gateway", - otelhttp.WithMetricAttributesFn(staticServerDomainAttrFn("libp2p")), - ) + handler = otelhttp.NewHandler(withMetricLabels(handler, staticServerDomainAttrFn("libp2p")), "Libp2p-Gateway") mux.Handle("/ipfs/", handler) @@ -272,6 +268,19 @@ var defaultPaths = []string{"/ipfs/", "/ipns/", "/p2p/"} // or "other". var serverDomainAttrKey = attribute.Key("server.domain") +// withMetricLabels wraps a handler so that otelhttp metric attributes are +// added via the request-scoped [otelhttp.Labeler] instead of the deprecated +// [otelhttp.WithMetricAttributesFn] option. The wrapper must run inside +// [otelhttp.NewHandler] (which injects the labeler into the context). +func withMetricLabels(next http.Handler, fn func(*http.Request) []attribute.KeyValue) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if l, ok := otelhttp.LabelerFromContext(r.Context()); ok { + l.Add(fn(r)...) + } + next.ServeHTTP(w, r) + }) +} + // staticServerDomainAttrFn returns a MetricAttributesFn that always returns // a fixed server.domain value. Use for handlers where the domain is known // statically (e.g. "api", "libp2p") to keep the label set consistent across @@ -281,7 +290,7 @@ func staticServerDomainAttrFn(domain string) func(*http.Request) []attribute.Key return func(*http.Request) []attribute.KeyValue { return attrs } } -// newServerDomainAttrFn returns an otelhttp.WithMetricAttributesFn callback +// newServerDomainAttrFn returns an attribute callback for [withMetricLabels] // that adds a server.domain attribute grouping requests by their matching // Gateway.PublicGateways hostname suffix (e.g. "dweb.link", "ipfs.io"). // Requests that don't match any configured gateway get "other". diff --git a/core/corehttp/webui.go b/core/corehttp/webui.go index a193bedc355..929308e83e0 100644 --- a/core/corehttp/webui.go +++ b/core/corehttp/webui.go @@ -12,11 +12,12 @@ import ( ) // WebUI version confirmed to work with this Kubo version -const WebUIPath = "/ipfs/bafybeiddnr2jz65byk67sjt6jsu6g7tueddr7odhzzpzli3rgudlbnc6iq" // v4.11.1 +const WebUIPath = "/ipfs/bafybeihxglpcfyarpm7apn7xpezbuoqgk3l5chyk7w4gvrjwk45rqohlmm" // v4.12.0 // WebUIPaths is a list of all past webUI paths. var WebUIPaths = []string{ WebUIPath, + "/ipfs/bafybeiddnr2jz65byk67sjt6jsu6g7tueddr7odhzzpzli3rgudlbnc6iq", // v4.11.1 "/ipfs/bafybeidfgbcqy435sdbhhejifdxq4o64tlsezajc272zpyxcsmz47uyc64", // v4.11.0 "/ipfs/bafybeidsjptidvb6wf6benznq2pxgnt5iyksgtecpmjoimlmswhtx2u5ua", // v4.10.0 "/ipfs/bafybeicg7e6o2eszkfdzxg5233gmuip2a7kfzoloh7voyvt2r6ivdet54u", // v4.9.1 diff --git a/core/coreiface/object.go b/core/coreiface/object.go index 27bb8893583..971af78bb29 100644 --- a/core/coreiface/object.go +++ b/core/coreiface/object.go @@ -50,7 +50,7 @@ type ObjectAPI interface { AddLink(ctx context.Context, base path.Path, name string, child path.Path, opts ...options.ObjectAddLinkOption) (path.ImmutablePath, error) // RmLink removes a link from the node - RmLink(ctx context.Context, base path.Path, link string) (path.ImmutablePath, error) + RmLink(ctx context.Context, base path.Path, link string, opts ...options.ObjectRmLinkOption) (path.ImmutablePath, error) // Diff returns a set of changes needed to transform the first object into the // second. diff --git a/core/coreiface/options/object.go b/core/coreiface/options/object.go index ab780ebd988..7942242a084 100644 --- a/core/coreiface/options/object.go +++ b/core/coreiface/options/object.go @@ -1,7 +1,8 @@ package options type ObjectAddLinkSettings struct { - Create bool + Create bool + SkipUnixFSValidation bool } type ( @@ -34,3 +35,43 @@ func (objectOpts) Create(create bool) ObjectAddLinkOption { return nil } } + +// SkipUnixFSValidation is an option for Object.AddLink which skips the check +// that only allows adding named links to UnixFS directory nodes. +// Use this when operating on raw dag-pb nodes outside of UnixFS semantics. +func (objectOpts) SkipUnixFSValidation(skip bool) ObjectAddLinkOption { + return func(settings *ObjectAddLinkSettings) error { + settings.SkipUnixFSValidation = skip + return nil + } +} + +type ObjectRmLinkSettings struct { + SkipUnixFSValidation bool +} + +type ( + ObjectRmLinkOption func(*ObjectRmLinkSettings) error +) + +func ObjectRmLinkOptions(opts ...ObjectRmLinkOption) (*ObjectRmLinkSettings, error) { + options := &ObjectRmLinkSettings{} + + for _, opt := range opts { + err := opt(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +// RmLinkSkipUnixFSValidation is an option for Object.RmLink which skips the +// check that only allows removing links from UnixFS directory nodes. +// Use this when operating on raw dag-pb nodes outside of UnixFS semantics. +func (objectOpts) RmLinkSkipUnixFSValidation(skip bool) ObjectRmLinkOption { + return func(settings *ObjectRmLinkSettings) error { + settings.SkipUnixFSValidation = skip + return nil + } +} diff --git a/core/coreiface/options/unixfs.go b/core/coreiface/options/unixfs.go index 11aefa11eb8..4d6fffc680f 100644 --- a/core/coreiface/options/unixfs.go +++ b/core/coreiface/options/unixfs.go @@ -233,8 +233,13 @@ func (unixfsOpts) MaxDirectoryLinks(n int) UnixfsAddOption { } // MaxHAMTFanout specifies the maximum width of the HAMT directory shards. +// Per the UnixFS spec, the value must be a power of 2, minimum 8 +// (for byte-aligned bitfields), and maximum 1024. func (unixfsOpts) MaxHAMTFanout(n int) UnixfsAddOption { return func(settings *UnixfsAddSettings) error { + if n < 8 || n&(n-1) != 0 || n > 1024 { + return fmt.Errorf("HAMT fanout must be a power of 2, between 8 and 1024 (got %d)", n) + } settings.MaxHAMTFanout = n settings.MaxHAMTFanoutSet = true return nil diff --git a/core/coreiface/options/unixfs_test.go b/core/coreiface/options/unixfs_test.go new file mode 100644 index 00000000000..0c64d842bb5 --- /dev/null +++ b/core/coreiface/options/unixfs_test.go @@ -0,0 +1,22 @@ +package options + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMaxHAMTFanoutValidation(t *testing.T) { + valid := []int{8, 16, 32, 64, 128, 256, 512, 1024} + for _, v := range valid { + _, _, err := UnixfsAddOptions(Unixfs.MaxHAMTFanout(v)) + require.NoError(t, err, "fanout %d should be valid", v) + } + + invalid := []int{-1, 0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 12, 24, 48, 100, 2048, 4096, 999999} + for _, v := range invalid { + _, _, err := UnixfsAddOptions(Unixfs.MaxHAMTFanout(v)) + require.Error(t, err, "fanout %d should be invalid", v) + require.Contains(t, err.Error(), "HAMT fanout must be") + } +} diff --git a/core/coreiface/tests/object.go b/core/coreiface/tests/object.go index bbc7180e41a..7fda21237b5 100644 --- a/core/coreiface/tests/object.go +++ b/core/coreiface/tests/object.go @@ -5,6 +5,7 @@ import ( "testing" dag "github.com/ipfs/boxo/ipld/merkledag" + ft "github.com/ipfs/boxo/ipld/unixfs" "github.com/ipfs/boxo/path" ipld "github.com/ipfs/go-ipld-format" iface "github.com/ipfs/kubo/core/coreiface" @@ -22,7 +23,9 @@ func (tp *TestSuite) TestObject(t *testing.T) { t.Run("TestObjectAddLink", tp.TestObjectAddLink) t.Run("TestObjectAddLinkCreate", tp.TestObjectAddLinkCreate) + t.Run("TestObjectAddLinkValidation", tp.TestObjectAddLinkValidation) t.Run("TestObjectRmLink", tp.TestObjectRmLink) + t.Run("TestObjectRmLinkValidation", tp.TestObjectRmLinkValidation) t.Run("TestDiffTest", tp.TestDiffTest) } @@ -58,7 +61,8 @@ func (tp *TestSuite) TestObjectAddLink(t *testing.T) { }, }) - p3, err := api.Object().AddLink(ctx, p2, "abc", p2) + // Raw dag-pb nodes require SkipUnixFSValidation since they have no UnixFS metadata + p3, err := api.Object().AddLink(ctx, p2, "abc", p2, opt.Object.SkipUnixFSValidation(true)) require.NoError(t, err) nd, err := api.Dag().Get(ctx, p3.RootCid()) @@ -84,10 +88,11 @@ func (tp *TestSuite) TestObjectAddLinkCreate(t *testing.T) { }, }) - _, err = api.Object().AddLink(ctx, p2, "abc/d", p2) + // Raw dag-pb nodes require SkipUnixFSValidation since they have no UnixFS metadata + _, err = api.Object().AddLink(ctx, p2, "abc/d", p2, opt.Object.SkipUnixFSValidation(true)) require.ErrorContains(t, err, "no link by that name") - p3, err := api.Object().AddLink(ctx, p2, "abc/d", p2, opt.Object.Create(true)) + p3, err := api.Object().AddLink(ctx, p2, "abc/d", p2, opt.Object.Create(true), opt.Object.SkipUnixFSValidation(true)) require.NoError(t, err) nd, err := api.Dag().Get(ctx, p3.RootCid()) @@ -99,6 +104,65 @@ func (tp *TestSuite) TestObjectAddLinkCreate(t *testing.T) { require.Equal(t, "bar", links[1].Name) } +// TestObjectAddLinkValidation verifies that AddLink rejects non-directory +// nodes by default, preventing the data-loss bug in +// https://github.com/ipfs/kubo/issues/7190 +func (tp *TestSuite) TestObjectAddLinkValidation(t *testing.T) { + ctx := t.Context() + api, err := tp.makeAPI(t, ctx) + require.NoError(t, err) + + child := putDagPbNode(t, ctx, api, "child", nil) + + // UnixFS Directory: allowed + dirNode := ft.EmptyDirNode() + err = api.Dag().Add(ctx, dirNode) + require.NoError(t, err) + dirPath := path.FromCid(dirNode.Cid()) + + _, err = api.Object().AddLink(ctx, dirPath, "foo", child) + require.NoError(t, err) + + // UnixFS File: rejected (would cause data loss on read-back) + fileNode := ft.EmptyFileNode() + err = api.Dag().Add(ctx, fileNode) + require.NoError(t, err) + filePath := path.FromCid(fileNode.Cid()) + + _, err = api.Object().AddLink(ctx, filePath, "foo", child) + require.ErrorContains(t, err, "cannot add named links to a UnixFS File node, only Directory nodes support link addition at the dag-pb level") + + // UnixFS File with SkipUnixFSValidation: allowed (user takes responsibility) + _, err = api.Object().AddLink(ctx, filePath, "foo", child, opt.Object.SkipUnixFSValidation(true)) + require.NoError(t, err) + + // HAMTShard: rejected (dag-pb level mutation corrupts HAMT bitfield) + hamtData, err := ft.HAMTShardData(nil, 256, 0x22) + require.NoError(t, err) + hamtNode := new(dag.ProtoNode) + hamtNode.SetData(hamtData) + err = api.Dag().Add(ctx, hamtNode) + require.NoError(t, err) + hamtPath := path.FromCid(hamtNode.Cid()) + + _, err = api.Object().AddLink(ctx, hamtPath, "foo", child) + require.ErrorContains(t, err, "cannot add links to a HAMTShard at the dag-pb level (would corrupt the HAMT bitfield); use 'ipfs files' commands instead, or pass --allow-non-unixfs to override") + + // HAMTShard with SkipUnixFSValidation: allowed + _, err = api.Object().AddLink(ctx, hamtPath, "foo", child, opt.Object.SkipUnixFSValidation(true)) + require.NoError(t, err) + + // Raw dag-pb (no UnixFS data): rejected + rawPb := putDagPbNode(t, ctx, api, "", nil) + + _, err = api.Object().AddLink(ctx, rawPb, "foo", child) + require.ErrorContains(t, err, "cannot add named links to a non-UnixFS dag-pb node; pass --allow-non-unixfs to skip validation") + + // Raw dag-pb with SkipUnixFSValidation: allowed + _, err = api.Object().AddLink(ctx, rawPb, "foo", child, opt.Object.SkipUnixFSValidation(true)) + require.NoError(t, err) +} + func (tp *TestSuite) TestObjectRmLink(t *testing.T) { ctx := t.Context() api, err := tp.makeAPI(t, ctx) @@ -113,7 +177,8 @@ func (tp *TestSuite) TestObjectRmLink(t *testing.T) { }, }) - p3, err := api.Object().RmLink(ctx, p2, "bar") + // Raw dag-pb nodes require SkipUnixFSValidation since they have no UnixFS metadata + p3, err := api.Object().RmLink(ctx, p2, "bar", opt.Object.RmLinkSkipUnixFSValidation(true)) require.NoError(t, err) nd, err := api.Dag().Get(ctx, p3.RootCid()) @@ -123,6 +188,69 @@ func (tp *TestSuite) TestObjectRmLink(t *testing.T) { require.Len(t, links, 0) } +// TestObjectRmLinkValidation verifies that RmLink rejects non-directory +// nodes by default, preventing silent DAG corruption. +func (tp *TestSuite) TestObjectRmLinkValidation(t *testing.T) { + ctx := t.Context() + api, err := tp.makeAPI(t, ctx) + require.NoError(t, err) + + child := putDagPbNode(t, ctx, api, "child", nil) + + // UnixFS Directory with a link: rm-link allowed + dirNode := ft.EmptyDirNode() + childNd, err := api.Dag().Get(ctx, child.RootCid()) + require.NoError(t, err) + err = dirNode.AddNodeLink("foo", childNd) + require.NoError(t, err) + err = api.Dag().Add(ctx, dirNode) + require.NoError(t, err) + dirPath := path.FromCid(dirNode.Cid()) + + _, err = api.Object().RmLink(ctx, dirPath, "foo") + require.NoError(t, err) + + // UnixFS File: rejected + fileNode := ft.EmptyFileNode() + err = api.Dag().Add(ctx, fileNode) + require.NoError(t, err) + filePath := path.FromCid(fileNode.Cid()) + + _, err = api.Object().RmLink(ctx, filePath, "foo") + require.ErrorContains(t, err, "cannot remove links from a UnixFS File node, only Directory nodes support link removal at the dag-pb level") + + // UnixFS File with SkipUnixFSValidation: allowed + _, err = api.Object().RmLink(ctx, filePath, "foo", opt.Object.RmLinkSkipUnixFSValidation(true)) + // ErrLinkNotFound is expected since the file has no links, but validation passed + require.ErrorContains(t, err, "no link by that name") + + // HAMTShard: rejected + hamtData, err := ft.HAMTShardData(nil, 256, 0x22) + require.NoError(t, err) + hamtNode := new(dag.ProtoNode) + hamtNode.SetData(hamtData) + err = api.Dag().Add(ctx, hamtNode) + require.NoError(t, err) + hamtPath := path.FromCid(hamtNode.Cid()) + + _, err = api.Object().RmLink(ctx, hamtPath, "foo") + require.ErrorContains(t, err, "cannot remove links from a HAMTShard at the dag-pb level (would corrupt the HAMT bitfield); use 'ipfs files rm' instead, or pass --allow-non-unixfs to override") + + // HAMTShard with SkipUnixFSValidation: allowed (validation bypassed) + _, err = api.Object().RmLink(ctx, hamtPath, "foo", opt.Object.RmLinkSkipUnixFSValidation(true)) + require.ErrorContains(t, err, "no link by that name") + + // Raw dag-pb (no UnixFS data): rejected + rawPb := putDagPbNode(t, ctx, api, "", nil) + + _, err = api.Object().RmLink(ctx, rawPb, "foo") + require.ErrorContains(t, err, "cannot remove links from a non-UnixFS dag-pb node; pass --allow-non-unixfs to skip validation") + + // Raw dag-pb with SkipUnixFSValidation: allowed + _, err = api.Object().RmLink(ctx, rawPb, "foo", opt.Object.RmLinkSkipUnixFSValidation(true)) + require.ErrorContains(t, err, "no link by that name") +} + func (tp *TestSuite) TestDiffTest(t *testing.T) { ctx := t.Context() api, err := tp.makeAPI(t, ctx) diff --git a/core/coreunix/add.go b/core/coreunix/add.go index 55ad1bd83bf..dada26fe273 100644 --- a/core/coreunix/add.go +++ b/core/coreunix/add.go @@ -107,12 +107,7 @@ func (adder *Adder) mfsRoot() (*mfs.Root, error) { } // Note, this adds it to DAGService already. - mr, err := mfs.NewEmptyRoot(adder.ctx, adder.dagService, nil, nil, mfs.MkdirOpts{ - CidBuilder: adder.CidBuilder, - MaxLinks: adder.MaxDirectoryLinks, - MaxHAMTFanout: adder.MaxHAMTFanout, - SizeEstimationMode: adder.SizeEstimationMode, - }) + mr, err := mfs.NewEmptyRoot(adder.ctx, adder.dagService, nil, nil, adder.mkdirOpts()...) if err != nil { return nil, err } @@ -125,6 +120,20 @@ func (adder *Adder) SetMfsRoot(r *mfs.Root) { adder.mroot = r } +// mkdirOpts returns MFS options derived from the adder's config, +// with any additional options appended. +func (adder *Adder) mkdirOpts(extra ...mfs.Option) []mfs.Option { + opts := []mfs.Option{ + mfs.WithCidBuilder(adder.CidBuilder), + mfs.WithMaxLinks(adder.MaxDirectoryLinks), + mfs.WithMaxHAMTFanout(adder.MaxHAMTFanout), + } + if adder.SizeEstimationMode != nil { + opts = append(opts, mfs.WithSizeEstimationMode(*adder.SizeEstimationMode)) + } + return append(opts, extra...) +} + // Constructs a node from reader's data, and adds it. Doesn't pin. func (adder *Adder) add(reader io.Reader) (ipld.Node, error) { chnk, err := chunker.FromString(reader, adder.Chunker) @@ -274,15 +283,8 @@ func (adder *Adder) addNode(node ipld.Node, path string) error { dir := gopath.Dir(path) if dir != "." { - opts := mfs.MkdirOpts{ - Mkparents: true, - Flush: false, - CidBuilder: adder.CidBuilder, - MaxLinks: adder.MaxDirectoryLinks, - MaxHAMTFanout: adder.MaxHAMTFanout, - SizeEstimationMode: adder.SizeEstimationMode, - } - if err := mfs.Mkdir(mr, dir, opts); err != nil { + mkdirOpts := adder.mkdirOpts() + if err := mfs.Mkdir(mr, dir, mfs.MkdirOpts{Mkparents: true, Flush: false}, mkdirOpts...); err != nil { return err } } @@ -506,15 +508,8 @@ func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory // if we need to store mode or modification time then create a new root which includes that data if toplevel && (adder.FileMode != 0 || !adder.FileMtime.IsZero()) { - mr, err := mfs.NewEmptyRoot(ctx, adder.dagService, nil, nil, - mfs.MkdirOpts{ - CidBuilder: adder.CidBuilder, - MaxLinks: adder.MaxDirectoryLinks, - MaxHAMTFanout: adder.MaxHAMTFanout, - ModTime: adder.FileMtime, - Mode: adder.FileMode, - SizeEstimationMode: adder.SizeEstimationMode, - }) + opts := adder.mkdirOpts(mfs.WithMode(adder.FileMode), mfs.WithModTime(adder.FileMtime)) + mr, err := mfs.NewEmptyRoot(ctx, adder.dagService, nil, nil, opts...) if err != nil { return err } @@ -526,16 +521,8 @@ func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory if err != nil { return err } - err = mfs.Mkdir(mr, path, mfs.MkdirOpts{ - Mkparents: true, - Flush: false, - CidBuilder: adder.CidBuilder, - Mode: adder.FileMode, - ModTime: adder.FileMtime, - MaxLinks: adder.MaxDirectoryLinks, - MaxHAMTFanout: adder.MaxHAMTFanout, - SizeEstimationMode: adder.SizeEstimationMode, - }) + mkdirOpts := adder.mkdirOpts(mfs.WithMode(adder.FileMode), mfs.WithModTime(adder.FileMtime)) + err = mfs.Mkdir(mr, path, mfs.MkdirOpts{Mkparents: true, Flush: false}, mkdirOpts...) if err != nil { return err } diff --git a/core/node/core.go b/core/node/core.go index 0b2af81c399..f3f8e84a5bb 100644 --- a/core/node/core.go +++ b/core/node/core.go @@ -52,10 +52,7 @@ func BlockService(cfg *config.Config) func(lc fx.Lifecycle, bs blockstore.Blocks // Pinning creates new pinner which tells GC which blocks should be kept func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo, prov DHTProvider) (pin.Pinner, error) { - // Parse strategy at function creation time (not inside the returned function) - // This happens before the provider is created, which is why we pass the strategy - // string and parse it here, rather than using fx-provided ProvidingStrategy. - strategyFlag := config.ParseProvideStrategy(strategy) + strategyFlag := config.MustParseProvideStrategy(strategy) return func(bstore blockstore.Blockstore, ds format.DAGService, @@ -238,7 +235,7 @@ func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo // strategy - it ensures all MFS content gets announced as it's added or // modified. For non-mfs strategies, we set provider to nil to avoid // unnecessary providing. - strategyFlag := config.ParseProvideStrategy(strategy) + strategyFlag := config.MustParseProvideStrategy(strategy) if strategyFlag&config.ProvideStrategyMFS == 0 { prov = nil } @@ -248,19 +245,12 @@ func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo if err != nil { return nil, fmt.Errorf("failed to get config: %w", err) } - chunkerGen := cfg.Import.UnixFSSplitterFunc() - maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks)) - maxHAMTFanout := int(cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout)) - hamtShardingSize := int(cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold)) - sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode() - - root, err := mfs.NewRoot(ctx, dag, nd, pf, prov, - mfs.WithChunker(chunkerGen), - mfs.WithMaxLinks(maxDirLinks), - mfs.WithMaxHAMTFanout(maxHAMTFanout), - mfs.WithHAMTShardingSize(hamtShardingSize), - mfs.WithSizeEstimationMode(sizeEstimationMode), - ) + mfsOpts, err := cfg.Import.MFSRootOptions() + if err != nil { + return nil, fmt.Errorf("failed to build MFS options from Import config: %w", err) + } + + root, err := mfs.NewRoot(ctx, dag, nd, pf, prov, mfsOpts...) if err != nil { return nil, fmt.Errorf("failed to initialize MFS root from %s stored at %s: %w. "+ "If corrupted, use 'ipfs files chroot' to reset (see --help)", nd.Cid(), FilesRootDatastoreKey, err) diff --git a/core/node/groups.go b/core/node/groups.go index ab497e33b57..d9c7f1ffb6f 100644 --- a/core/node/groups.go +++ b/core/node/groups.go @@ -243,7 +243,9 @@ func Storage(bcfg *BuildCfg, cfg *config.Config) fx.Option { finalBstore := fx.Provide(GcBlockstoreCtor) if cfg.Experimental.FilestoreEnabled || cfg.Experimental.UrlstoreEnabled { - finalBstore = fx.Provide(FilestoreBlockstoreCtor) + finalBstore = fx.Provide(FilestoreBlockstoreCtor( + cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy), + )) } return fx.Options( diff --git a/core/node/libp2p/fd/sys_not_unix.go b/core/node/libp2p/fd/sys_not_unix.go index c857987480d..1358d64dd8b 100644 --- a/core/node/libp2p/fd/sys_not_unix.go +++ b/core/node/libp2p/fd/sys_not_unix.go @@ -1,3 +1,4 @@ +// Stub returning zero on platforms without /proc or Handle APIs. //go:build !linux && !darwin && !windows package fd diff --git a/core/node/libp2p/fd/sys_unix.go b/core/node/libp2p/fd/sys_unix.go index dcb82a8815e..c9d5317a5a3 100644 --- a/core/node/libp2p/fd/sys_unix.go +++ b/core/node/libp2p/fd/sys_unix.go @@ -1,3 +1,4 @@ +// File descriptor counting via /proc/self/fd (linux) or lsof (darwin). //go:build linux || darwin package fd diff --git a/core/node/libp2p/fd/sys_windows.go b/core/node/libp2p/fd/sys_windows.go index eec17f3883f..389ad127b2e 100644 --- a/core/node/libp2p/fd/sys_windows.go +++ b/core/node/libp2p/fd/sys_windows.go @@ -1,3 +1,4 @@ +// File descriptor counting via Windows Handle API. //go:build windows package fd diff --git a/core/node/libp2p/routing.go b/core/node/libp2p/routing.go index b3a4610ed15..305ca903380 100644 --- a/core/node/libp2p/routing.go +++ b/core/node/libp2p/routing.go @@ -122,7 +122,8 @@ func BaseRouting(cfg *config.Config) any { // we want to also use the default HTTP routers, so wrap the FullRT client // in a parallel router that calls them in parallel - httpRouters, err := constructDefaultHTTPRouters(cfg) + addrFunc := httpRouterAddrFunc(in.Host, cfg.Addresses) + httpRouters, err := constructDefaultHTTPRouters(cfg, addrFunc) if err != nil { return out, err } diff --git a/core/node/libp2p/routingopt.go b/core/node/libp2p/routingopt.go index c8f22af2f01..e9ba01494c3 100644 --- a/core/node/libp2p/routingopt.go +++ b/core/node/libp2p/routingopt.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "slices" "strings" "time" @@ -18,6 +19,8 @@ import ( host "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" routing "github.com/libp2p/go-libp2p/core/routing" + basichost "github.com/libp2p/go-libp2p/p2p/host/basic" + ma "github.com/multiformats/go-multiaddr" ) type RoutingOptionArgs struct { @@ -105,7 +108,7 @@ func collectAllEndpoints(cfg *config.Config) []EndpointSource { return endpoints } -func constructDefaultHTTPRouters(cfg *config.Config) ([]*routinghelpers.ParallelRouter, error) { +func constructDefaultHTTPRouters(cfg *config.Config, addrFunc func() []ma.Multiaddr) ([]*routinghelpers.ParallelRouter, error) { var routers []*routinghelpers.ParallelRouter httpRetrievalEnabled := cfg.HTTPRetrieval.Enabled.WithDefault(config.DefaultHTTPRetrievalEnabled) @@ -130,7 +133,7 @@ func constructDefaultHTTPRouters(cfg *config.Config) ([]*routinghelpers.Parallel // Create single HTTP router and composer per origin for baseURL, capabilities := range originCapabilities { // Construct HTTP router using base URL (without path) - httpRouter, err := irouting.ConstructHTTPRouter(baseURL, cfg.Identity.PeerID, httpAddrsFromConfig(cfg.Addresses), cfg.Identity.PrivKey, httpRetrievalEnabled) + httpRouter, err := irouting.ConstructHTTPRouter(baseURL, cfg.Identity.PeerID, addrFunc, cfg.Identity.PrivKey, httpRetrievalEnabled) if err != nil { return nil, err } @@ -191,7 +194,8 @@ func ConstructDelegatedOnlyRouting(cfg *config.Config) RoutingOption { var routers []*routinghelpers.ParallelRouter // Add HTTP delegated routers (includes both router and publisher capabilities) - httpRouters, err := constructDefaultHTTPRouters(cfg) + addrFunc := httpRouterAddrFunc(args.Host, cfg.Addresses) + httpRouters, err := constructDefaultHTTPRouters(cfg, addrFunc) if err != nil { return nil, err } @@ -225,7 +229,8 @@ func ConstructDefaultRouting(cfg *config.Config, routingOpt RoutingOption) Routi ExecuteAfter: 0, }) - httpRouters, err := constructDefaultHTTPRouters(cfg) + addrFunc := httpRouterAddrFunc(args.Host, cfg.Addresses) + httpRouters, err := constructDefaultHTTPRouters(cfg, addrFunc) if err != nil { return nil, err } @@ -255,22 +260,37 @@ func constructDHTRouting(mode dht.ModeOpt) RoutingOption { wanOptions := []dht.Option{ dht.BootstrapPeers(args.BootstrapPeers...), } + // In stub mode, allow loopback peers in the WAN routing + // table so Provide/PutValue work with ephemeral test peers. + if os.Getenv("TEST_DHT_STUB") != "" { + wanOptions = append(wanOptions, + dht.AddressFilter(nil), + dht.QueryFilter(func(_ any, _ peer.AddrInfo) bool { return true }), + dht.RoutingTableFilter(func(_ any, _ peer.ID) bool { return true }), + dht.RoutingTablePeerDiversityFilter(nil), + ) + } lanOptions := []dht.Option{} if args.LoopbackAddressesOnLanDHT { lanOptions = append(lanOptions, dht.AddressFilter(nil)) } - return dual.New( + d, err := dual.New( args.Ctx, args.Host, dual.DHTOption(dhtOpts...), dual.WanDHTOption(wanOptions...), dual.LanDHTOption(lanOptions...), ) + if err != nil { + return nil, err + } + return d, nil } } // ConstructDelegatedRouting is used when Routing.Type = "custom" func ConstructDelegatedRouting(routers config.Routers, methods config.Methods, peerID string, addrs config.Addresses, privKey string, httpRetrieval bool) RoutingOption { return func(args RoutingOptionArgs) (routing.Routing, error) { + addrFunc := httpRouterAddrFunc(args.Host, addrs) return irouting.Parse(routers, methods, &irouting.ExtraDHTParams{ BootstrapPeers: args.BootstrapPeers, @@ -281,7 +301,7 @@ func ConstructDelegatedRouting(routers config.Routers, methods config.Methods, p }, &irouting.ExtraHTTPParams{ PeerID: peerID, - Addrs: httpAddrsFromConfig(addrs), + AddrFunc: addrFunc, PrivKeyB64: privKey, HTTPRetrieval: httpRetrieval, }, @@ -300,30 +320,78 @@ var ( NilRouterOption = constructNilRouting ) -// httpAddrsFromConfig creates a list of addresses from the provided configuration to be used by HTTP delegated routers. -func httpAddrsFromConfig(cfgAddrs config.Addresses) []string { - // Swarm addrs are announced by default - addrs := cfgAddrs.Swarm - // if Announce addrs are specified - override Swarm +// confirmedAddrsHost matches libp2p hosts that support AutoNAT V2 address confirmation. +type confirmedAddrsHost interface { + ConfirmedAddrs() (reachable, unreachable, unknown []ma.Multiaddr) +} + +// Compile-time check: BasicHost must satisfy confirmedAddrsHost. +// ConfirmedAddrs is not part of the core host.Host interface and is marked +// experimental in go-libp2p. If BasicHost ever drops or changes this method, +// this assertion will fail at build time. In that case, update +// httpRouterAddrFunc (this file) and the swarm autonat command +// (core/commands/swarm_addrs_autonat.go) which both type-assert to this +// interface. +var _ confirmedAddrsHost = (*basichost.BasicHost)(nil) + +// httpRouterAddrFunc returns a function that resolves provider addresses for +// HTTP routers at provide-time. +// +// Resolution logic: +// - If Announce is set, use it as a static override (no dynamic resolution). +// - Otherwise, prefer AutoNAT V2 confirmed reachable addresses when available, +// falling back to static Swarm addresses (filtered by NoAnnounce). +// - AppendAnnounce addresses are always appended. +func httpRouterAddrFunc(h host.Host, cfgAddrs config.Addresses) func() []ma.Multiaddr { + appendAddrs := parseMultiaddrs(cfgAddrs.AppendAnnounce) + + // If Announce is explicitly set, use it as a static override. if len(cfgAddrs.Announce) > 0 { - addrs = cfgAddrs.Announce - } else if len(cfgAddrs.NoAnnounce) > 0 { - // if Announce adds are not specified - filter Swarm addrs with NoAnnounce list - maddrs := map[string]struct{}{} - for _, addr := range addrs { - maddrs[addr] = struct{}{} + staticAddrs := slices.Concat(parseMultiaddrs(cfgAddrs.Announce), appendAddrs) + return func() []ma.Multiaddr { return staticAddrs } + } + + // Precompute fallback: Swarm minus NoAnnounce plus AppendAnnounce. + fallbackStrs := cfgAddrs.Swarm + if len(cfgAddrs.NoAnnounce) > 0 { + noAnnounce := map[string]struct{}{} + for _, a := range cfgAddrs.NoAnnounce { + noAnnounce[a] = struct{}{} } - for _, addr := range cfgAddrs.NoAnnounce { - delete(maddrs, addr) + filtered := make([]string, 0, len(fallbackStrs)) + for _, a := range fallbackStrs { + if _, skip := noAnnounce[a]; !skip { + filtered = append(filtered, a) + } } - addrs = make([]string, 0, len(maddrs)) - for k := range maddrs { - addrs = append(addrs, k) + fallbackStrs = filtered + } + fallbackResult := slices.Concat(parseMultiaddrs(fallbackStrs), appendAddrs) + + ch, hasConfirmed := h.(confirmedAddrsHost) + return func() []ma.Multiaddr { + if hasConfirmed { + reachable, _, _ := ch.ConfirmedAddrs() + if len(reachable) > 0 { + if len(appendAddrs) == 0 { + return reachable + } + return slices.Concat(reachable, appendAddrs) + } } + return fallbackResult } - // append AppendAnnounce addrs to the result list - if len(cfgAddrs.AppendAnnounce) > 0 { - addrs = append(addrs, cfgAddrs.AppendAnnounce...) +} + +func parseMultiaddrs(strs []string) []ma.Multiaddr { + addrs := make([]ma.Multiaddr, 0, len(strs)) + for _, s := range strs { + a, err := ma.NewMultiaddr(s) + if err != nil { + log.Errorf("ignoring invalid multiaddr %q: %s", s, err) + continue + } + addrs = append(addrs, a) } return addrs } diff --git a/core/node/libp2p/routingopt_test.go b/core/node/libp2p/routingopt_test.go index 1a06045d987..3342170a533 100644 --- a/core/node/libp2p/routingopt_test.go +++ b/core/node/libp2p/routingopt_test.go @@ -1,40 +1,22 @@ package libp2p import ( + "context" "testing" "github.com/ipfs/boxo/autoconf" config "github.com/ipfs/kubo/config" + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestHttpAddrsFromConfig(t *testing.T) { - require.Equal(t, []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic-v1"}, - httpAddrsFromConfig(config.Addresses{ - Swarm: []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic-v1"}, - }), "Swarm addrs should be taken by default") - - require.Equal(t, []string{"/ip4/192.168.0.1/tcp/4001"}, - httpAddrsFromConfig(config.Addresses{ - Swarm: []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic-v1"}, - Announce: []string{"/ip4/192.168.0.1/tcp/4001"}, - }), "Announce addrs should override Swarm if specified") - - require.Equal(t, []string{"/ip4/0.0.0.0/udp/4001/quic-v1"}, - httpAddrsFromConfig(config.Addresses{ - Swarm: []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic-v1"}, - NoAnnounce: []string{"/ip4/0.0.0.0/tcp/4001"}, - }), "Swarm addrs should not contain NoAnnounce addrs") - - require.Equal(t, []string{"/ip4/192.168.0.1/tcp/4001", "/ip4/192.168.0.2/tcp/4001"}, - httpAddrsFromConfig(config.Addresses{ - Swarm: []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic-v1"}, - Announce: []string{"/ip4/192.168.0.1/tcp/4001"}, - AppendAnnounce: []string{"/ip4/192.168.0.2/tcp/4001"}, - }), "AppendAnnounce addrs should be included if specified") -} - func TestDetermineCapabilities(t *testing.T) { tests := []struct { name string @@ -222,3 +204,87 @@ func TestEndpointCapabilitiesReadWriteLogic(t *testing.T) { assert.False(t, capabilities.IPNSPut) }) } + +// stubHost is a minimal host.Host stub for testing httpRouterAddrFunc. +// Only the methods checked via type assertion (confirmedAddrsHost) matter; +// all other methods panic if called. +type stubHost struct { + reachable []ma.Multiaddr +} + +func (h *stubHost) ConfirmedAddrs() (reachable, unreachable, unknown []ma.Multiaddr) { + return h.reachable, nil, nil +} + +func (h *stubHost) ID() peer.ID { panic("unused") } +func (h *stubHost) Addrs() []ma.Multiaddr { panic("unused") } +func (h *stubHost) Peerstore() peerstore.Peerstore { panic("unused") } +func (h *stubHost) Network() network.Network { panic("unused") } +func (h *stubHost) Mux() protocol.Switch { panic("unused") } +func (h *stubHost) Connect(context.Context, peer.AddrInfo) error { panic("unused") } +func (h *stubHost) SetStreamHandler(protocol.ID, network.StreamHandler) { panic("unused") } +func (h *stubHost) SetStreamHandlerMatch(protocol.ID, func(protocol.ID) bool, network.StreamHandler) { + panic("unused") +} +func (h *stubHost) RemoveStreamHandler(protocol.ID) { panic("unused") } +func (h *stubHost) NewStream(context.Context, peer.ID, ...protocol.ID) (network.Stream, error) { + panic("unused") +} +func (h *stubHost) Close() error { panic("unused") } +func (h *stubHost) ConnManager() connmgr.ConnManager { panic("unused") } +func (h *stubHost) EventBus() event.Bus { panic("unused") } + +func TestHttpRouterAddrFunc(t *testing.T) { + tests := []struct { + name string + reachable []string // autonat confirmed addrs (nil = none) + cfg config.Addresses + want []string + }{ + { + name: "prefers autonat confirmed reachable addrs over swarm fallback", + reachable: []string{"/ip4/1.2.3.4/tcp/4001", "/ip4/1.2.3.4/udp/4001/quic-v1"}, + cfg: config.Addresses{Swarm: []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic-v1"}}, + want: []string{"/ip4/1.2.3.4/tcp/4001", "/ip4/1.2.3.4/udp/4001/quic-v1"}, + }, + { + name: "falls back to swarm when autonat has no confirmed addrs", + cfg: config.Addresses{Swarm: []string{"/ip4/0.0.0.0/tcp/4001"}}, + want: []string{"/ip4/0.0.0.0/tcp/4001"}, + }, + { + name: "Announce overrides autonat and swarm", + reachable: []string{"/ip4/1.2.3.4/tcp/4001"}, + cfg: config.Addresses{Swarm: []string{"/ip4/0.0.0.0/tcp/4001"}, Announce: []string{"/ip4/5.6.7.8/tcp/4001"}}, + want: []string{"/ip4/5.6.7.8/tcp/4001"}, + }, + { + name: "AppendAnnounce added to autonat addrs", + reachable: []string{"/ip4/1.2.3.4/tcp/4001"}, + cfg: config.Addresses{Swarm: []string{"/ip4/0.0.0.0/tcp/4001"}, AppendAnnounce: []string{"/ip4/10.0.0.1/tcp/4001"}}, + want: []string{"/ip4/1.2.3.4/tcp/4001", "/ip4/10.0.0.1/tcp/4001"}, + }, + { + name: "AppendAnnounce added to swarm fallback", + cfg: config.Addresses{Swarm: []string{"/ip4/0.0.0.0/tcp/4001"}, AppendAnnounce: []string{"/ip4/10.0.0.1/tcp/4001"}}, + want: []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/10.0.0.1/tcp/4001"}, + }, + { + name: "NoAnnounce filters swarm fallback", + cfg: config.Addresses{Swarm: []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic-v1"}, NoAnnounce: []string{"/ip4/0.0.0.0/tcp/4001"}}, + want: []string{"/ip4/0.0.0.0/udp/4001/quic-v1"}, + }, + { + name: "AppendAnnounce added to Announce", + cfg: config.Addresses{Swarm: []string{"/ip4/0.0.0.0/tcp/4001"}, Announce: []string{"/ip4/5.6.7.8/tcp/4001"}, AppendAnnounce: []string{"/ip4/10.0.0.1/tcp/4001"}}, + want: []string{"/ip4/5.6.7.8/tcp/4001", "/ip4/10.0.0.1/tcp/4001"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := &stubHost{reachable: parseMultiaddrs(tt.reachable)} + fn := httpRouterAddrFunc(h, tt.cfg) + assert.Equal(t, parseMultiaddrs(tt.want), fn()) + }) + } +} diff --git a/core/node/provider.go b/core/node/provider.go index fba012422d6..04b14b235f6 100644 --- a/core/node/provider.go +++ b/core/node/provider.go @@ -2,11 +2,15 @@ package node import ( "context" + "encoding/binary" "errors" "fmt" + "os" + "path/filepath" "time" "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/dag/walker" "github.com/ipfs/boxo/fetcher" "github.com/ipfs/boxo/mfs" pin "github.com/ipfs/boxo/pinning/pinner" @@ -14,11 +18,13 @@ import ( "github.com/ipfs/boxo/provider" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/mount" "github.com/ipfs/go-datastore/namespace" "github.com/ipfs/go-datastore/query" log "github.com/ipfs/go-log/v2" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/repo" + "github.com/ipfs/kubo/repo/fsrepo" irouting "github.com/ipfs/kubo/routing" dht "github.com/libp2p/go-libp2p-kad-dht" "github.com/libp2p/go-libp2p-kad-dht/amino" @@ -48,14 +54,35 @@ const ( // Datastore key used to store previous reprovide strategy. reprovideStrategyKey = "/reprovideStrategy" - // Datastore namespace prefix for provider data. - providerDatastorePrefix = "provider" - // Datastore path for the provider keystore. - keystoreDatastorePath = "keystore" + // KeystoreDatastorePath is the base directory for the provider keystore datastores. + KeystoreDatastorePath = "provider-keystore" + + // reprovideLastUniqueCountKey stores the unique CID count from + // the last +unique reprovide cycle, used to size the next cycle's + // bloom filter. + reprovideLastUniqueCountKey = "/reprovideLastUniqueCount" +) + +var ( + // Datastore namespace key for provider data. + providerDatastoreKey = datastore.NewKey("provider") + // Datastore namespace key for provider keystore data. + keystoreDatastoreKey = datastore.NewKey("keystore") ) var errAcceleratedDHTNotReady = errors.New("AcceleratedDHTClient: routing table not ready") +// validateKeystoreSuffix rejects any suffix other than "0" or "1". +// The upstream library uses these two values as alternating namespace +// identifiers. Validating here prevents accidental deletion of unrelated +// directories via os.RemoveAll if the upstream ever changes its scheme. +func validateKeystoreSuffix(suffix string) error { + if suffix != "0" && suffix != "1" { + return fmt.Errorf("unexpected keystore suffix %q, expected \"0\" or \"1\"", suffix) + } + return nil +} + // Interval between reprovide queue monitoring checks for slow reprovide alerts. // Used when Provide.DHT.SweepEnabled=true const reprovideAlertPollInterval = 15 * time.Minute @@ -369,19 +396,297 @@ type addrsFilter interface { FilteredAddrs() []ma.Multiaddr } +// findRootDatastoreSpec extracts the leaf datastore spec for the root ("/") +// mount from the repo's Datastore.Spec config. It unwraps mount (picks the "/" +// mountpoint), measure, and log wrappers to find the actual backend spec +// (e.g., levelds, pebbleds). +func findRootDatastoreSpec(spec map[string]any) map[string]any { + if spec == nil { + return nil + } + switch spec["type"] { + case "mount": + mounts, ok := spec["mounts"].([]any) + if !ok { + return spec + } + for _, m := range mounts { + mnt, ok := m.(map[string]any) + if !ok { + continue + } + if mnt["mountpoint"] == "/" { + return findRootDatastoreSpec(mnt) + } + } + // No root mount found; return nil so callers fall back gracefully + // (in-memory datastore or skip mounting) rather than passing a + // mount-type spec to openDatastoreAt which expects a leaf backend. + return nil + case "measure", "log": + if child, ok := spec["child"].(map[string]any); ok { + return findRootDatastoreSpec(child) + } + return spec + default: + if _, hasChild := spec["child"]; hasChild { + logger.Warnw("unrecognized datastore wrapper type, using as-is", + "type", spec["type"]) + } + return spec + } +} + +// MountKeystoreDatastores opens any provider keystore datastores that exist on +// disk and returns them as mount.Mount entries ready to be combined with the +// main repo datastore. The caller must call the returned cleanup function when +// done. Returns nil mounts and a no-op closer if no keystores exist. +func MountKeystoreDatastores(repo repo.Repo) ([]mount.Mount, func(), error) { + cfg, err := repo.Config() + if err != nil { + return nil, nil, fmt.Errorf("reading repo config: %w", err) + } + + rootSpec := findRootDatastoreSpec(cfg.Datastore.Spec) + if rootSpec == nil { + return nil, func() {}, nil + } + + keystoreBasePath := filepath.Join(repo.Path(), KeystoreDatastorePath) + var mounts []mount.Mount + var closers []func() + + for _, suffix := range []string{"0", "1"} { + dir := filepath.Join(keystoreBasePath, suffix) + if _, err := os.Stat(dir); err != nil { + continue + } + ds, err := openDatastoreAt(rootSpec, dir) + if err != nil { + for _, c := range closers { + c() + } + return nil, nil, err + } + prefix := providerDatastoreKey.Child(keystoreDatastoreKey).ChildString(suffix) + mounts = append(mounts, mount.Mount{Prefix: prefix, Datastore: ds}) + closers = append(closers, func() { ds.Close() }) + } + + closer := func() { + for _, c := range closers { + c() + } + } + return mounts, closer, nil +} + +// openDatastoreAt opens a datastore using the given spec at the specified path. +// It deep-copies the spec to avoid mutating the original. +func openDatastoreAt(rootSpec map[string]any, path string) (datastore.Batching, error) { + spec := copySpec(rootSpec) + spec["path"] = path + dsc, err := fsrepo.AnyDatastoreConfig(spec) + if err != nil { + return nil, fmt.Errorf("creating datastore config for %s: %w", path, err) + } + return dsc.Create("") +} + +// copySpec deep-copies a datastore spec map so modifications (e.g., changing +// the path) don't affect the original. +func copySpec(spec map[string]any) map[string]any { + if spec == nil { + return nil + } + cp := make(map[string]any, len(spec)) + for k, v := range spec { + switch val := v.(type) { + case map[string]any: + cp[k] = copySpec(val) + case []any: + s := make([]any, len(val)) + for i, elem := range val { + if m, ok := elem.(map[string]any); ok { + s[i] = copySpec(m) + } else { + s[i] = elem + } + } + cp[k] = s + default: + cp[k] = v + } + } + return cp +} + +// purgeBatchSize is the number of keys deleted per batch commit during +// orphaned keystore cleanup. Each commit is a cancellation checkpoint. +const purgeBatchSize = 1 << 12 // 4096 + +// purgeOrphanedKeystoreData deletes all keys under /provider/keystore/ from the +// shared repo datastore. These were written by older Kubo versions that stored +// provider keystore data inline in the shared datastore. The new code uses +// separate filesystem datastores under /{KeystoreDatastorePath}/ instead. +// +// The operation is idempotent and safe to interrupt: partial completion is +// fine because already-deleted keys are no-ops on re-run. +func purgeOrphanedKeystoreData(ctx context.Context, ds datastore.Batching) error { + orphanedPrefix := providerDatastoreKey.Child(keystoreDatastoreKey).String() + syncKey := datastore.NewKey(orphanedPrefix) + + results, err := ds.Query(ctx, query.Query{ + Prefix: orphanedPrefix, + KeysOnly: true, + }) + if err != nil { + return fmt.Errorf("querying orphaned keystore data: %w", err) + } + defer results.Close() + + var batch datastore.Batch + var count, pending int + for result := range results.Next() { + if ctx.Err() != nil { + return ctx.Err() + } + if result.Error != nil { + return fmt.Errorf("iterating orphaned keystore data: %w", result.Error) + } + if batch == nil { + batch, err = ds.Batch(ctx) + if err != nil { + return fmt.Errorf("creating batch for orphaned keystore cleanup: %w", err) + } + } + if err := batch.Delete(ctx, datastore.NewKey(result.Key)); err != nil { + return fmt.Errorf("batch deleting orphaned key %s: %w", result.Key, err) + } + count++ + pending++ + if pending >= purgeBatchSize { + if err := batch.Commit(ctx); err != nil { + return fmt.Errorf("committing orphaned keystore cleanup batch: %w", err) + } + if err := ds.Sync(ctx, syncKey); err != nil { + return fmt.Errorf("syncing orphaned keystore cleanup: %w", err) + } + batch = nil + pending = 0 + } + } + if pending > 0 { + if err := batch.Commit(ctx); err != nil { + return fmt.Errorf("committing orphaned keystore cleanup batch: %w", err) + } + if err := ds.Sync(ctx, syncKey); err != nil { + return fmt.Errorf("syncing orphaned keystore cleanup: %w", err) + } + } + if count > 0 { + logger.Infow("purged orphaned provider keystore data from shared datastore", "keys", count) + } + return nil +} + func SweepingProviderOpt(cfg *config.Config) fx.Option { reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) type providerInput struct { fx.In DHT routing.Routing `name:"dhtc"` Repo repo.Repo + Lc fx.Lifecycle } sweepingReprovider := fx.Provide(func(in providerInput) (DHTProvider, *keystore.ResettableKeystore, error) { - ds := namespace.Wrap(in.Repo.Datastore(), datastore.NewKey(providerDatastorePrefix)) - ks, err := keystore.NewResettableKeystore(ds, - keystore.WithPrefixBits(16), - keystore.WithDatastorePath(keystoreDatastorePath), - keystore.WithBatchSize(int(cfg.Provide.DHT.KeystoreBatchSize.WithDefault(config.DefaultProvideDHTKeystoreBatchSize))), + ds := namespace.Wrap(in.Repo.Datastore(), providerDatastoreKey) + + // Get repo path and config to determine datastore type + repoPath := in.Repo.Path() + repoCfg, err := in.Repo.Config() + if err != nil { + return nil, nil, fmt.Errorf("getting repo config: %w", err) + } + + // Find the root datastore type (levelds, pebbleds, etc.) + rootSpec := findRootDatastoreSpec(repoCfg.Datastore.Spec) + + // Keystore datastores live at /{KeystoreDatastorePath}/ + keystoreBasePath := filepath.Join(repoPath, KeystoreDatastorePath) + + createDs := func(suffix string) (datastore.Batching, error) { + if err := validateKeystoreSuffix(suffix); err != nil { + return nil, err + } + // When no datastore spec is configured (e.g., test/mock repos), + // fall back to an in-memory datastore. + if rootSpec == nil { + return datastore.NewMapDatastore(), nil + } + if err := os.MkdirAll(keystoreBasePath, 0o755); err != nil { + return nil, fmt.Errorf("creating keystore base directory: %w", err) + } + ds, err := openDatastoreAt(rootSpec, filepath.Join(keystoreBasePath, suffix)) + if err != nil { + return nil, err + } + logger.Infow("provider keystore: opened datastore", "suffix", suffix, "path", filepath.Join(keystoreBasePath, suffix)) + return ds, nil + } + + destroyDs := func(suffix string) error { + if err := validateKeystoreSuffix(suffix); err != nil { + return err + } + logger.Infow("provider keystore: removing datastore from disk", "suffix", suffix, "path", filepath.Join(keystoreBasePath, suffix)) + return os.RemoveAll(filepath.Join(keystoreBasePath, suffix)) + } + + // One-time cleanup of stale keystore data left by older Kubo in the + // shared repo datastore under /provider/keystore/. New code stores + // bulk key data in separate filesystem datastores under + // /{KeystoreDatastorePath}/ while still using the same + // /provider/keystore/ namespace in the shared datastore for metadata. + // + // The absence of the keystoreBasePath directory signals a first run + // after upgrade: the directory is created later by createDs on first + // use, so it doubles as a "cleanup done" flag. If the process dies + // mid-purge the directory still won't exist and the cleanup re-runs + // on next start (it is idempotent). Must run synchronously before + // NewResettableKeystore to avoid racing with reads on the same + // namespace. + if _, statErr := os.Stat(keystoreBasePath); os.IsNotExist(statErr) { + logger.Infow("migrating provider keystore data from shared datastore to separate filesystem datastores", "path", keystoreBasePath) + // Create a cancellable context for the purge. The OnStop hook + // below calls purgeCancel when the node receives a shutdown + // signal (e.g., SIGINT), which interrupts the purge loop + // instead of blocking indefinitely. + purgeCtx, purgeCancel := context.WithCancel(context.Background()) + in.Lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + purgeCancel() + return nil + }, + }) + if purgeErr := purgeOrphanedKeystoreData(purgeCtx, in.Repo.Datastore()); purgeErr != nil { + if purgeCtx.Err() != nil { + logger.Infow("provider keystore migration interrupted by shutdown, will resume on next start") + } else { + logger.Warnw("provider keystore migration failed, will retry on next start", "error", purgeErr) + } + } else { + logger.Infow("provider keystore migration completed") + } + purgeCancel() + } + + keystoreDs := namespace.Wrap(ds, keystoreDatastoreKey) + ks, err := keystore.NewResettableKeystore(keystoreDs, + keystore.WithDatastoreFactory(createDs, destroyDs), + keystore.KeystoreOption( + keystore.WithPrefixBits(16), + keystore.WithBatchSize(int(cfg.Provide.DHT.KeystoreBatchSize.WithDefault(config.DefaultProvideDHTKeystoreBatchSize))), + ), ) if err != nil { return nil, nil, err @@ -796,13 +1101,14 @@ func OnlineProviders(provide bool, cfg *config.Config) fx.Option { providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy) - strategyFlag := config.ParseProvideStrategy(providerStrategy) - if strategyFlag == 0 { - return fx.Error(fmt.Errorf("provider: unknown strategy %q", providerStrategy)) + if _, err := config.ParseProvideStrategy(providerStrategy); err != nil { + return fx.Error(fmt.Errorf("provider: %w", err)) } + bloomFPRate := uint(cfg.Provide.BloomFPRate.WithDefault(config.DefaultProvideBloomFPRate)) + opts := []fx.Option{ - fx.Provide(setReproviderKeyProvider(providerStrategy)), + fx.Provide(setReproviderKeyProvider(providerStrategy, bloomFPRate)), } sweepEnabled := cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) @@ -864,13 +1170,188 @@ type provStrategyOut struct { ProvidingKeyChanFunc provider.KeyChanFunc } +// readLastUniqueCount reads the persisted unique CID count from the +// previous +unique reprovide cycle. Returns 0 if not found or corrupt. +func readLastUniqueCount(ds datastore.Datastore) uint64 { + val, err := ds.Get(context.Background(), datastore.NewKey(reprovideLastUniqueCountKey)) + if err != nil { + return 0 + } + if len(val) != 8 { + return 0 + } + return binary.BigEndian.Uint64(val) +} + +// persistUniqueCount stores the unique CID count for the next cycle. +func persistUniqueCount(ds datastore.Datastore, count uint64) { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, count) + if err := ds.Put(context.Background(), datastore.NewKey(reprovideLastUniqueCountKey), buf); err != nil { + logger.Errorf("failed to persist unique count: %s", err) + } +} + +// walkFunc abstracts a DAG walk (WalkDAG or WalkEntityRoots) so the +// MFS provider can be parameterized without duplicating the +// flush+walk+channel boilerplate. +type walkFunc func(ctx context.Context, root cid.Cid, emit func(cid.Cid) bool, opts ...walker.Option) error + +// uniqueMFSProvider is the +unique counterpart of mfsProvider. It +// flushes the MFS root, then walks the MFS DAG with a shared +// VisitedTracker and a locality check (blockstore.Has) so only +// locally-present blocks are emitted. +func uniqueMFSProvider(mfsRoot *mfs.Root, bs blockstore.Blockstore, tracker walker.VisitedTracker) provider.KeyChanFunc { + walk := func(ctx context.Context, root cid.Cid, emit func(cid.Cid) bool, opts ...walker.Option) error { + return walker.WalkDAG(ctx, root, walker.LinksFetcherFromBlockstore(bs), emit, opts...) + } + return mfsWalkProvider(mfsRoot, bs, tracker, walk) +} + +// mfsEntityRootsProvider is the +entities counterpart. It walks with +// WalkEntityRoots, emitting only entity roots and skipping file chunks. +func mfsEntityRootsProvider(mfsRoot *mfs.Root, bs blockstore.Blockstore, tracker walker.VisitedTracker) provider.KeyChanFunc { + walk := func(ctx context.Context, root cid.Cid, emit func(cid.Cid) bool, opts ...walker.Option) error { + return walker.WalkEntityRoots(ctx, root, walker.NodeFetcherFromBlockstore(bs), emit, opts...) + } + return mfsWalkProvider(mfsRoot, bs, tracker, walk) +} + +// mfsWalkProvider builds a KeyChanFunc that flushes MFS, then walks +// with the given walkFunc using a shared tracker and locality check. +func mfsWalkProvider(mfsRoot *mfs.Root, bs blockstore.Blockstore, tracker walker.VisitedTracker, walk walkFunc) provider.KeyChanFunc { + return func(ctx context.Context) (<-chan cid.Cid, error) { + if err := mfsRoot.FlushMemFree(ctx); err != nil { + return nil, fmt.Errorf("provider: error flushing MFS: %w", err) + } + rootNode, err := mfsRoot.GetDirectory().GetNode() + if err != nil { + return nil, fmt.Errorf("provider: error loading MFS root: %w", err) + } + + ch := make(chan cid.Cid) + go func() { + defer close(ch) + locality := func(ctx context.Context, c cid.Cid) (bool, error) { + return bs.Has(ctx, c) + } + _ = walk(ctx, rootNode.Cid(), func(c cid.Cid) bool { + select { + case ch <- c: + return true + case <-ctx.Done(): + return false + } + }, walker.WithVisitedTracker(tracker), walker.WithLocality(locality)) + }() + return ch, nil + } +} + // createKeyProvider creates the appropriate KeyChanFunc based on strategy. -// Each strategy has different behavior: -// - "roots": Only root CIDs of pinned content -// - "pinned": All pinned content (roots + children) -// - "mfs": Only MFS content -// - "all": all blocks -func createKeyProvider(strategyFlag config.ProvideStrategy, in provStrategyIn) provider.KeyChanFunc { +// fpRate is the bloom filter target false-positive rate (1/N) used by +// +unique and +entities cycles. Ignored by other strategies. +func createKeyProvider(strategyFlag config.ProvideStrategy, fpRate uint, in provStrategyIn) provider.KeyChanFunc { + // +unique modifier: use bloom filter cross-DAG dedup + useUnique := strategyFlag&config.ProvideStrategyUnique != 0 + if useUnique { + basePinned := strategyFlag&config.ProvideStrategyPinned != 0 + baseMFS := strategyFlag&config.ProvideStrategyMFS != 0 + ds := in.Repo.Datastore() + + // return a KeyChanFunc that creates a fresh bloom each cycle + return func(ctx context.Context) (<-chan cid.Cid, error) { + count := readLastUniqueCount(ds) + // size the bloom from the previous cycle's count (with growth + // margin for repo changes between cycles), falling back to + // DefaultBloomInitialCapacity on the very first cycle. The + // bloom chain auto-grows if the repo exceeds this estimate. + expectedItems := max( + uint64(walker.DefaultBloomInitialCapacity), + uint64(float64(count)*walker.BloomGrowthMargin), + ) + // the tracker is shared across all sub-walks (MFS, recursive + // pins, direct pins) within a single reprovide cycle. it + // detects duplicate sub-DAG branches across recursive pins + // that share content (e.g. append-only datasets where each + // version differs by a small delta). when a CID is already + // in the bloom, its entire subtree is skipped, reducing + // traversal from O(pins * total_blocks) to O(unique_blocks). + tracker, err := walker.NewBloomTracker(uint(expectedItems), fpRate) + if err != nil { + return nil, fmt.Errorf("bloom tracker: %w", err) + } + + useEntities := strategyFlag&config.ProvideStrategyEntities != 0 + + // select provider functions based on +entities modifier: + // +entities uses WalkEntityRoots (skips file chunks), + // +unique without +entities uses WalkDAG (all blocks). + makePinProv := dspinner.NewUniquePinnedProvider + makeMFSProv := uniqueMFSProvider + if useEntities { + makePinProv = dspinner.NewPinnedEntityRootsProvider + makeMFSProv = mfsEntityRootsProvider + } + + var inner provider.KeyChanFunc + switch { + case basePinned && baseMFS: + // MFS first: walk MFS (locality-filtered), then pinned. + // NewConcatProvider (not NewPrioritizedProvider) because + // the shared bloom tracker already guarantees each CID + // is emitted at most once -- no need for a second dedup + // layer. NewBufferedProvider decouples the pinned + // provider so the pinner lock is released promptly. + inner = provider.NewConcatProvider( + makeMFSProv(in.MFSRoot, in.Blockstore, tracker), + provider.NewBufferedProvider( + makePinProv(in.Pinner, in.Blockstore, tracker)), + ) + case basePinned: + inner = provider.NewBufferedProvider( + makePinProv(in.Pinner, in.Blockstore, tracker)) + case baseMFS: + inner = makeMFSProv(in.MFSRoot, in.Blockstore, tracker) + default: + return nil, fmt.Errorf("provider: +unique requires pinned and/or mfs") + } + + // wrap inner channel to persist bloom count on successful close + innerCh, err := inner(ctx) + if err != nil { + return nil, err + } + + ch := make(chan cid.Cid) + go func() { + defer func() { + if ctx.Err() == nil { + persistUniqueCount(ds, tracker.Count()) + } + logger.Infow("unique reprovide cycle finished", + "providedCIDs", tracker.Count(), + "skippedBranches", tracker.Deduplicated()) + close(ch) + }() + for c := range innerCh { + select { + case ch <- c: + case <-ctx.Done(): + return + } + } + }() + + logger.Infow("unique reprovide cycle started", + "expectedItems", expectedItems, + "previousCount", count, + ) + return ch, nil + } + } + + // non-unique strategies (unchanged) switch strategyFlag { case config.ProvideStrategyRoots: return provider.NewBufferedProvider(dspinner.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher)) @@ -941,12 +1422,12 @@ func handleStrategyChange(strategy string, provider DHTProvider, ds datastore.Da } } -func setReproviderKeyProvider(strategy string) func(in provStrategyIn) provStrategyOut { - strategyFlag := config.ParseProvideStrategy(strategy) +func setReproviderKeyProvider(strategy string, fpRate uint) func(in provStrategyIn) provStrategyOut { + strategyFlag := config.MustParseProvideStrategy(strategy) return func(in provStrategyIn) provStrategyOut { // Create the appropriate key provider based on strategy - kcf := createKeyProvider(strategyFlag, in) + kcf := createKeyProvider(strategyFlag, fpRate, in) return provStrategyOut{ ProvidingStrategy: strategyFlag, ProvidingKeyChanFunc: kcf, diff --git a/core/node/provider_test.go b/core/node/provider_test.go new file mode 100644 index 00000000000..0a7e6c0ef15 --- /dev/null +++ b/core/node/provider_test.go @@ -0,0 +1,91 @@ +package node + +import ( + "context" + "math" + "testing" + + "github.com/ipfs/go-datastore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// newTestDatastore returns a fresh in-memory datastore for unique-count +// persistence tests. Tests are single-goroutine so no sync wrapper is +// needed. +func newTestDatastore() datastore.Datastore { + return datastore.NewMapDatastore() +} + +func TestReadLastUniqueCount_emptyReturnsZero(t *testing.T) { + ds := newTestDatastore() + + // A fresh datastore has no persisted count. The reader treats this + // as "no previous cycle data available" and returns 0, which the + // caller falls back to DefaultBloomInitialCapacity for. + got := readLastUniqueCount(ds) + assert.Equal(t, uint64(0), got) +} + +func TestPersistAndReadUniqueCount_roundTrip(t *testing.T) { + tests := []struct { + name string + count uint64 + }{ + {"zero", 0}, + {"one", 1}, + {"small", 1_000}, + {"million", 1_000_000}, + {"billion", 1_000_000_000}, + {"max uint64", math.MaxUint64}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ds := newTestDatastore() + persistUniqueCount(ds, tt.count) + got := readLastUniqueCount(ds) + assert.Equal(t, tt.count, got) + }) + } +} + +func TestPersistUniqueCount_overwriteReplacesPreviousValue(t *testing.T) { + ds := newTestDatastore() + + // Each reprovide cycle persists a new count, overwriting the + // previous one. The reader must return the most recent value. + persistUniqueCount(ds, 1_000) + persistUniqueCount(ds, 2_000_000) + persistUniqueCount(ds, 42) + + got := readLastUniqueCount(ds) + assert.Equal(t, uint64(42), got) +} + +func TestReadLastUniqueCount_corruptLengthReturnsZero(t *testing.T) { + tests := []struct { + name string + raw []byte + }{ + {"empty bytes", []byte{}}, + {"too short (4 bytes)", []byte{0x01, 0x02, 0x03, 0x04}}, + {"too long (16 bytes)", make([]byte, 16)}, + {"single byte", []byte{0xFF}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ds := newTestDatastore() + // Write malformed bytes directly under the persistence key + // to simulate a corrupt or truncated entry. + err := ds.Put(context.Background(), datastore.NewKey(reprovideLastUniqueCountKey), tt.raw) + require.NoError(t, err) + + // The reader rejects anything that is not exactly 8 bytes + // and falls back to 0 instead of panicking on a short read. + got := readLastUniqueCount(ds) + assert.Equal(t, uint64(0), got) + }) + } +} diff --git a/core/node/storage.go b/core/node/storage.go index e97a0db4ab9..6308e77a952 100644 --- a/core/node/storage.go +++ b/core/node/storage.go @@ -7,6 +7,7 @@ import ( "go.uber.org/fx" "github.com/ipfs/boxo/filestore" + "github.com/ipfs/boxo/provider" "github.com/ipfs/kubo/core/node/helpers" "github.com/ipfs/kubo/repo" "github.com/ipfs/kubo/thirdparty/verifbs" @@ -41,7 +42,7 @@ func BaseBlockstoreCtor( // Important: Provide calls from blockstore are intentionally BLOCKING. // The Provider implementation (not the blockstore) should handle concurrency/queuing. // This avoids spawning unbounded goroutines for concurrent block additions. - strategyFlag := config.ParseProvideStrategy(providingStrategy) + strategyFlag := config.MustParseProvideStrategy(providingStrategy) if strategyFlag&config.ProvideStrategyAll != 0 { opts = append(opts, blockstore.Provider(prov)) } @@ -77,14 +78,25 @@ func GcBlockstoreCtor(bb BaseBlocks) (gclocker blockstore.GCLocker, gcbs blockst } // FilestoreBlockstoreCtor wraps GcBlockstore and adds Filestore support -func FilestoreBlockstoreCtor(repo repo.Repo, bb BaseBlocks, prov DHTProvider) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) { - gclocker = blockstore.NewGCLocker() +func FilestoreBlockstoreCtor( + providingStrategy string, +) func(repo repo.Repo, bb BaseBlocks, prov DHTProvider) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) { + return func(repo repo.Repo, bb BaseBlocks, prov DHTProvider) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) { + gclocker = blockstore.NewGCLocker() - // hash security - fstore = filestore.NewFilestore(bb, repo.FileManager(), prov) - gcbs = blockstore.NewGCBlockstore(fstore, gclocker) - gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs} + var fstoreProv provider.MultihashProvider + strategyFlag := config.MustParseProvideStrategy(providingStrategy) + if strategyFlag&config.ProvideStrategyAll != 0 { + fstoreProv = prov + } - bs = gcbs - return + fstore = filestore.NewFilestore(bb, repo.FileManager(), fstoreProv) + + // hash security + gcbs = blockstore.NewGCBlockstore(fstore, gclocker) + gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs} + + bs = gcbs + return + } } diff --git a/coverage/main/main.go b/coverage/main/main.go index 0d279d967e3..3a897eadab9 100644 --- a/coverage/main/main.go +++ b/coverage/main/main.go @@ -1,3 +1,4 @@ +// Only built when collecting coverage via "go test -tags testrunmain". //go:build testrunmain package main diff --git a/docs/README.md b/docs/README.md index a3777546de0..348e9d7359b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -29,6 +29,7 @@ If you're experiencing an issue with IPFS, please [file an issue](https://github ## Development - **[Developer Guide](developer-guide.md)** - prerequisites, build, test, and contribute +- **[AGENTS.md](../AGENTS.md)** - instructions for AI coding agents - Contributing Guidelines [for IPFS projects](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) and for [Go code specifically](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) - [Building on Windows](windows.md) - [Customizing Kubo](customizing.md) diff --git a/docs/RELEASE_CHECKLIST.md b/docs/RELEASE_CHECKLIST.md index da96a20d485..70034debc7a 100644 --- a/docs/RELEASE_CHECKLIST.md +++ b/docs/RELEASE_CHECKLIST.md @@ -1,4 +1,4 @@ - + # ✅ Release Checklist (vX.Y.Z[-rcN]) @@ -21,32 +21,33 @@ - [ ] Create `./docs/changelogs/vX.Y+1.md` and add link in [CHANGELOG.md](https://github.com/ipfs/kubo/blob/master/CHANGELOG.md) - [ ] Switch to `release-vX.Y.Z` branch and update [version.go](https://github.com/ipfs/kubo/blob/master/version.go) to `vX.Y.Z(-rcN)` (⚠️ double-check Y matches release) ([example](https://github.com/ipfs/kubo/pull/9394)) - [ ] Create draft PR: `release-vX.Y.Z` → `release` ([example](https://github.com/ipfs/kubo/pull/9306)) -- [ ] In `release-vX.Y.Z` branch, cherry-pick commits from `master`: `git cherry-pick -x ` ([example](https://github.com/ipfs/kubo/pull/10636/commits/033de22e3bc6191dbb024ad6472f5b96b34e3ccf)) - - ⚠️ **NOTE:** `-x` flag records original commit SHA for traceability and ensures cleaner merges with deduplicated commits in history +- [ ] Cherry-pick commits from `master` into `release-vX.Y.Z`: `git cherry-pick -x ` ([example](https://github.com/ipfs/kubo/pull/10636/commits/033de22e3bc6191dbb024ad6472f5b96b34e3ccf)) + - ⚠️ **NOTE:** `-x` flag records original commit SHA for traceability and cleaner merge history - [ ] Verify all CI checks on the PR are passing -- [ ] **FINAL only:** In `release-vX.Y.Z` branch, replace `Changelog` and `Contributors` sections with `./bin/mkreleaselog` stdout (do **NOT** copy stderr) +- [ ] **FINAL only:** Replace `Changelog` and `Contributors` sections in `release-vX.Y.Z` with `./bin/mkreleaselog` stdout (do **NOT** copy stderr) - [ ] **FINAL only:** Merge PR (`release-vX.Y.Z` → `release`) using `Create a merge commit` - - ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit + - ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` -- we want the releaser's GPG signature on the merge commit - ⚠️ do **NOT** delete the `release-vX.Y.Z` branch (needed for future patch releases and git history) ## 2. Tag & Publish ### Create Tag -⚠️ **POINT OF NO RETURN:** Once pushed, tags trigger automatic Docker/NPM publishing that cannot be reversed! +⚠️ **POINT OF NO RETURN:** Once pushed, tags trigger automatic Docker/NPM publishing and are irreversible! If you're making a release for the first time, do pair programming and have the release reviewer verify all commands. - [ ] **RC:** From `release-vX.Y.Z` branch: `git tag -s vX.Y.Z-rcN -m 'Prerelease X.Y.Z-rcN'` - [ ] **FINAL:** After PR merge, from `release` branch: `git tag -s vX.Y.Z -m 'Release X.Y.Z'` - [ ] ⚠️ Verify tag is signed and correct: `git show vX.Y.Z(-rcN)` - [ ] Push tag: `git push origin vX.Y.Z(-rcN)` - - ⚠️ do **NOT** use `git push --tags` because it pushes all your local tags + - ⚠️ do **NOT** use `git push --tags` (pushes all local tags, polluting the repo with noise) - [ ] **STOP:** Wait for [Docker build](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) to complete before proceeding ### Publish Artifacts -- [ ] **Docker:** Publish to [DockerHub](https://hub.docker.com/r/ipfs/kubo/tags) - - [ ] Wait for [Publish docker image](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) workflow triggered by tag push - - [ ] Verify image is available on [Docker Hub → tags](https://hub.docker.com/r/ipfs/kubo/tags) +> **Parallelism:** Docker and dist.ipfs.tech only depend on the pushed tag and can be started in parallel. +> NPM and GitHub Release both depend on dist.ipfs.tech completing first. + +- [ ] **Docker:** Verify [docker-image CI](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) passed and image is available on [Docker Hub → tags](https://hub.docker.com/r/ipfs/kubo/tags) - [ ] **dist.ipfs.tech:** Publish to [dist.ipfs.tech](https://dist.ipfs.tech) - [ ] Check out [ipfs/distributions](https://github.com/ipfs/distributions) - [ ] Create branch: `git checkout -b release-kubo-X.Y.Z(-rcN)` @@ -64,7 +65,7 @@ If you're making a release for the first time, do pair programming and have the - [ ] Link to release issue - [ ] **RC:** Link to changelog, check `This is a pre-release` - [ ] **FINAL:** Copy changelog content (without header), do **NOT** check pre-release - - [ ] Run [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow + - [ ] Run [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow (requires dist.ipfs.tech) - [ ] Verify assets are attached to the GitHub release ## 3. Post-Release @@ -74,18 +75,18 @@ If you're making a release for the first time, do pair programming and have the - [ ] **FINAL only:** Merge `release` → `master` - [ ] Create branch `merge-release-vX.Y.Z` from `release` - [ ] Merge `master` to `merge-release-vX.Y.Z` first, and resolve conflict in `version.go` - - ⚠️ **NOTE:** make sure to ignore the changes to [version.go](https://github.com/ipfs/kubo/blob/master/version.go) (keep the `-dev` in `master`) + - ⚠️ **NOTE:** keep the `-dev` version from `master` in [version.go](https://github.com/ipfs/kubo/blob/master/version.go), discard version from `release` - [ ] Create and merge PR from `merge-release-vX.Y.Z` to `master` using `Create a merge commit` - - ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we want to preserve original commit history + - ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` -- only `Create a merge commit` preserves commit history and audit trail of what was merged where - [ ] Update [ipshipyard/waterworks-infra](https://github.com/ipshipyard/waterworks-infra) - [ ] Update Kubo staging environment ([Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8)) - [ ] **RC:** Test last release against current RC - [ ] **FINAL:** Latest release on both boxes - [ ] **FINAL:** Update collab cluster boxes to the tagged release - [ ] **FINAL:** Update libp2p bootstrappers to the tagged release -- [ ] Smoke test with [IPFS Companion Browser Extension](https://docs.ipfs.tech/install/ipfs-companion/) - [ ] Update [ipfs-desktop](https://github.com/ipfs/ipfs-desktop) - [ ] Create PR updating kubo version in `package.json` and `package-lock.json` + - [ ] Smoke test with [IPFS Companion Browser Extension](https://docs.ipfs.tech/install/ipfs-companion/) against the PR build - [ ] **FINAL:** Merge PR and ship new ipfs-desktop release - [ ] **FINAL only:** Update [docs.ipfs.tech](https://docs.ipfs.tech/): run [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow and merge the PR diff --git a/docs/changelogs/v0.40.md b/docs/changelogs/v0.40.md index 1750223504c..1ed451425f4 100644 --- a/docs/changelogs/v0.40.md +++ b/docs/changelogs/v0.40.md @@ -49,7 +49,7 @@ This release brings reproducible file imports (CID Profiles), automatic cleanup #### 🔢 IPIP-499: UnixFS CID Profiles -CID Profiles are presets that pin down how files get split into blocks and organized into directories, so you get the same CID for the same data across different software or versions. Defined in [IPIP-499](https://github.com/ipfs/specs/pull/499). +CID Profiles are presets that pin down how files get split into blocks and organized into directories, so you get the same CID for the same data across different software or versions. Defined in [IPIP-499](https://specs.ipfs.tech/ipips/ipip-0499/). **New configuration [profiles](https://github.com/ipfs/kubo/blob/master/docs/config.md#profiles)** @@ -94,7 +94,7 @@ Under the hood, the block storage layer (flatfs) was rewritten to use atomic bat #### 🌍 Light clients can now use your node for delegated routing -The [Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) is now exposed by default at `http://127.0.0.1:8080/routing/v1`. This allows light clients in browsers to use Kubo Gateway as a delegated routing backend instead of running a full DHT client. Support for [IPIP-476: Delegated Routing DHT Closest Peers API](https://github.com/ipfs/specs/pull/476) is included. Can be disabled via [`Gateway.ExposeRoutingAPI`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewayexposeroutingapi). +The [Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) is now exposed by default at `http://127.0.0.1:8080/routing/v1`. This allows light clients in browsers to use Kubo Gateway as a delegated routing backend instead of running a full DHT client. Support for [IPIP-476: Delegated Routing DHT Closest Peers API](https://specs.ipfs.tech/ipips/ipip-0476/) is included. Can be disabled via [`Gateway.ExposeRoutingAPI`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewayexposeroutingapi). #### 📊 See total size when pinning @@ -108,13 +108,13 @@ Fetched/Processed 336 nodes (83 MB) #### 🔀 IPIP-523: `?format=` takes precedence over `Accept` header -The `?format=` URL query parameter now always wins over the `Accept` header ([IPIP-523](https://github.com/ipfs/specs/pull/523)), giving you deterministic HTTP caching and protecting against CDN cache-key collisions. Browsers can also use `?format=` reliably even when they send `Accept` headers with specific content types. +The `?format=` URL query parameter now always wins over the `Accept` header ([IPIP-523](https://specs.ipfs.tech/ipips/ipip-0523/)), giving you deterministic HTTP caching and protecting against CDN cache-key collisions. Browsers can also use `?format=` reliably even when they send `Accept` headers with specific content types. The only breaking change is for edge cases where a client sends both a specific `Accept` header and a different `?format=` value for an explicitly supported format (`tar`, `raw`, `car`, `dag-json`, `dag-cbor`, etc.). Previously `Accept` would win. Now `?format=` always wins. #### 🚫 IPIP-524: Gateway codec conversion disabled by default -Gateways no longer convert between codecs by default ([IPIP-524](https://github.com/ipfs/specs/pull/524)). This removes gateways from a gatekeeping role: clients can adopt new codecs immediately without waiting for gateway operator updates. Requests for a format that differs from the block's codec now return `406 Not Acceptable`. +Gateways no longer convert between codecs by default ([IPIP-524](https://specs.ipfs.tech/ipips/ipip-0524/)). This removes gateways from a gatekeeping role: clients can adopt new codecs immediately without waiting for gateway operator updates. Requests for a format that differs from the block's codec now return `406 Not Acceptable`. **Migration**: Clients should fetch raw blocks (`?format=raw` or `Accept: application/vnd.ipld.raw`) and convert client-side using libraries like [@helia/verified-fetch](https://www.npmjs.com/package/@helia/verified-fetch). diff --git a/docs/changelogs/v0.41.md b/docs/changelogs/v0.41.md new file mode 100644 index 00000000000..e943afd7312 --- /dev/null +++ b/docs/changelogs/v0.41.md @@ -0,0 +1,227 @@ +# Kubo changelog v0.41 + + + +This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. + +- [v0.41.0](#v0410) + +## v0.41.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [🗑️ Faster Provide Queue Disk Reclamation](#-faster-provide-queue-disk-reclamation) + - [✨ New `ipfs cid inspect` command](#-new-ipfs-cid-inspect-command) + - [🔤 `--cid-base` fixes across all commands](#-cid-base-fixes-across-all-commands) + - [🔄 Built-in `ipfs update` command](#-built-in-ipfs-update-command) + - [🖥️ WebUI Improvements](#-webui-improvements) + - [🔧 Correct provider addresses for custom HTTP routing](#-correct-provider-addresses-for-custom-http-routing) + - [🔀 `Provide.Strategy` modifiers: `+unique` and `+entities`](#-providestrategy-modifiers-unique-and-entities) + - [📌 `pin add` and `pin update` now fast-provide root CID](#-pin-add-and-pin-update-now-fast-provide-root-cid) + - [🌳 New `--fast-provide-dag` flag for fine-tuned provide control](#-new---fast-provide-dag-flag-for-fine-tuned-provide-control) + - [🛡️ Hardened `Provide.Strategy` parsing](#-hardened-providestrategy-parsing) + - [🔧 Filestore now respects `Provide.Strategy`](#-filestore-now-respects-providestrategy) + - [🛡️ `ipfs object patch` validates UnixFS node types](#-ipfs-object-patch-validates-unixfs-node-types) + - [🔗 MFS: fixed CidBuilder preservation](#-mfs-fixed-cidbuilder-preservation) + - [📂 FUSE Mount Improvements](#-fuse-mount-improvements) + - [📦 CARv2 import over HTTP API](#-carv2-import-over-http-api) + - [🐹 Go 1.26, Once More with Feeling](#-go-126-once-more-with-feeling) + - [📦️ Dependency updates](#-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +### 🔦 Highlights + +#### 🗑️ Faster Provide Queue Disk Reclamation + +Nodes with significant amount of data and DHT provide sweep enabled +(`Provide.DHT.SweepEnabled`, the default since Kubo 0.39) could see their +`datastore/` directory grow continuously. +Each reprovide cycle rewrote the provider keystore inside the shared repo +datastore, generating tombstones faster than the storage engine could compact +them, and in default configuration Kubo was slow to reclaim this space. + +The provider keystore now lives in a dedicated datastore under +`$IPFS_PATH/provider-keystore/`. After each reprovide cycle the old datastore +is removed from disk entirely, so space is reclaimed immediately regardless +of storage backend. + +On first start after upgrading, stale keystore data is cleaned up from the +shared datastore automatically. + +To learn more, see [kubo#11096](https://github.com/ipfs/kubo/issues/11096), +[kubo#11198](https://github.com/ipfs/kubo/pull/11198), and +[go-libp2p-kad-dht#1233](https://github.com/libp2p/go-libp2p-kad-dht/pull/1233). + +#### ✨ New `ipfs cid inspect` command + +New subcommand for breaking down a CID into its components. Works offline, supports `--enc=json`. + +```console +$ ipfs cid inspect bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi +CID: bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi +Version: 1 +Multibase: base32 (b) +Multicodec: dag-pb (0x70) +Multihash: sha2-256 (0x12) + Length: 32 bytes + Digest: c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a +CIDv0: QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR +CIDv1: bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi +``` + +See `ipfs cid --help` for all CID-related commands. + +#### 🔤 `--cid-base` fixes across all commands + +`--cid-base` is now respected by every command that outputs CIDs. Previously `block stat`, `block put`, `block rm`, `dag stat`, `refs local`, `pin remote`, and `files chroot` ignored the flag. + +CIDv0 values are now auto-upgraded to CIDv1 when a non-base58btc base is requested, because CIDv0 can only be represented in base58btc. + +#### 🔄 Built-in `ipfs update` command + +Kubo now ships with a built-in `ipfs update` command that downloads release binaries from GitHub and swaps the current one in place. It supersedes the external [`ipfs-update`](https://github.com/ipfs/ipfs-update) tool, deprecated since [v0.37](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.37.md#-repository-migration-from-v16-to-v17-with-embedded-tooling). + +```console +$ ipfs update check +Update available: 0.40.0 -> 0.41.0 +Run 'ipfs update install' to install the latest version. +``` + +See `ipfs update --help` for the available subcommands (`check`, `versions`, `install`, `revert`, `clean`). + +#### 🖥️ WebUI Improvements + +IPFS Web UI has been updated to [v4.12.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.12.0). + +##### IPv6 peer geolocation and Peers screen optimizations + +The Peers screen now resolves IPv6 addresses to geographic locations, and the geolocation database has been updated to `GeoLite2-City-CSV_20260220`. ([ipfs-geoip v9.3.0](https://github.com/ipfs-shipyard/ipfs-geoip/releases/tag/v9.3.0)) + +Peer locations load faster thanks to UX optimizations in the underlying ipfs-geoip library. + +#### 🔧 Correct provider addresses for custom HTTP routing + +Nodes using custom routing (`Routing.Type=custom`) with [IPIP-526](https://github.com/ipfs/specs/pull/526) could end up publishing unresolved `0.0.0.0` addresses in provider records. Addresses are now resolved at provide-time, and when AutoNAT V2 has confirmed publicly reachable addresses, those are preferred automatically. See [#11213](https://github.com/ipfs/kubo/issues/11213). + +#### 🔀 `Provide.Strategy` modifiers: `+unique` and `+entities` + +Experimental opt-in optimizations for content providers with large repositories where multiple recursive pins share most of their DAG structure (e.g. append-only datasets, versioned archives like dist.ipfs.tech). + +- `+unique`: bloom filter dedup across recursive pins. Shared subtrees are traversed only once per reprovide cycle instead of once per pin, cutting I/O from O(pins * blocks) to O(unique blocks) at ~4 bytes/CID. +- `+entities`: announces only entity roots (files, directories, HAMT shards), skipping internal file chunks. Drastically fewer DHT provider records while keeping all content discoverable by file/directory CID. Implies `+unique`. + +Example: `Provide.Strategy = "pinned+mfs+entities"` + +The default `Provide.Strategy=all` is unchanged. See [`Provide.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy) for configuration details and caveats. + +The bloom filter precision is tunable via [`Provide.BloomFPRate`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providebloomfprate) (default ~1 false positive per 4.75M lookups, ~4 bytes per CID). + +#### 📌 `pin add` and `pin update` now fast-provide root CID + +`ipfs pin add` and `ipfs pin update` announce the pinned root CID to the routing system immediately after pinning, same as `ipfs add` and `ipfs dag import`. This matters for selective strategies like `pinned+mfs`, where previously the root CID was not announced until the next reprovide cycle (see [`Provide.DHT.Interval`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtinterval)). With the default `Provide.Strategy=all`, the blockstore already provides every block on write, so this is a no-op. + +Both commands now accept `--fast-provide-root`, `--fast-provide-dag`, and `--fast-provide-wait` flags, matching `ipfs add` and `ipfs dag import`. See [`Import`](https://github.com/ipfs/kubo/blob/master/docs/config.md#import) for defaults and configuration. + +#### 🌳 New `--fast-provide-dag` flag for fine-tuned provide control + +Users with a custom [`Provide.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy) (e.g. `pinned`, `pinned+mfs+entities`) now have finer control over which CIDs are announced immediately on `ipfs add`, `ipfs dag import`, `ipfs pin add`, and `ipfs pin update`. + +By default, only the root CID is provided right away (`--fast-provide-root=true`). Child blocks are deferred until the next [reprovide cycle](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtinterval). This keeps bulk imports fast and avoids overwhelming online nodes with provide traffic. + +Pass `--fast-provide-dag=true` (or set [`Import.FastProvideDAG`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importfastprovidedag)) to provide the full DAG immediately during add, using the active `Provide.Strategy` to determine scope. + +`Provide.Strategy=all` (default) is unaffected. It provides every block at the blockstore level regardless of this flag. + +> [!NOTE] +> **Faster default imports for `Provide.Strategy=pinned` and `pinned+mfs` users.** Previously, `ipfs add --pin` eagerly announced every block of newly added content as it was written, through an internal DAG service wrapper. This release consolidates add-time providing through the new `--fast-provide-dag` code path, which defaults to `false`. The out-of-the-box result is faster bulk imports and less provide traffic during add: only the root CID is announced immediately (via [`Import.FastProvideRoot`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importfastprovideroot)), and child blocks are picked up by the next reprovide cycle (see [`Provide.DHT.Interval`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtinterval), default 22h). To restore the previous eager-provide behavior on `ipfs add`, set [`Import.FastProvideDAG=true`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importfastprovidedag) (or pass `--fast-provide-dag=true` per command); the walker honors the active `Provide.Strategy`. `Provide.Strategy=all` (the default) is unaffected. + +#### 🛡️ Hardened `Provide.Strategy` parsing + +Unknown strategy tokens (e.g. typo `"uniuqe"`), malformed delimiters (`"pinned+"`), and invalid combinations (`"all+pinned"`) now produce a clear error at startup instead of being silently ignored. + +#### 🔧 Filestore now respects `Provide.Strategy` + +Blocks added via the [filestore](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-filestore) or [urlstore](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-urlstore) (`ipfs add --nocopy`) used to ignore [`Provide.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy) and were always announced at write time. The filestore is now gated on the strategy the same way the regular blockstore is, so selective strategies get the same [fast-provide knobs](#-new---fast-provide-dag-flag-for-fine-tuned-provide-control) for filestore-backed content that they already had for regular `ipfs add`. + +#### 🛡️ `ipfs object patch` validates UnixFS node types + +As part of the ongoing deprecation of the legacy `ipfs object` API (which +predates HAMTShard directories and CIDv1), the `add-link` and `rm-link` +subcommands now validate the root node before mutating it. + +These commands operate at the raw `dag-pb` level and can only safely mutate +small, flat UnixFS directories. They are unable to update UnixFS metadata +(HAMT bitfields, file `Blocksizes`), so using them on files or sharded +directories would silently produce invalid DAGs. This is now rejected: + +- **File** nodes: rejected (corrupts `Blocksizes`, content lost on read-back) +- **HAMTShard** nodes: rejected (HAMT bitfield not updated, corrupts directory) +- **Non-UnixFS `dag-pb`** nodes: rejected by default +- **Directory** nodes: allowed (the only safe case) + +Use `ipfs files` commands (`mkdir`, `cp`, `rm`, `mv`) instead. They handle all +directory types correctly, including large sharded directories. + +A `--allow-non-unixfs` flag is available on both `ipfs object patch` commands to bypass validation. + +#### 🔗 MFS: fixed CidBuilder preservation + +`ipfs files` commands now correctly preserve the configured CID version and hash function (`Import.CidVersion`, `Import.HashFunction`) in all MFS operations. Previously, the `CidBuilder` could be silently lost when modifying file contents, creating nested directories with `mkdir -p`, or restarting the daemon, causing some entries to fall back to CIDv0/sha2-256. + +Additionally, the MFS root directory itself now respects [`Import.CidVersion`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importcidversion) and [`Import.HashFunction`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importhashfunction) at daemon startup. Before this fix, the root always used CIDv0/sha2-256 regardless of config. Because the MFS root CID format is now managed by these config options, `ipfs files chcid` no longer accepts the root path `/`. It continues to work on subdirectories. + +See [boxo#1125](https://github.com/ipfs/boxo/pull/1125) and [kubo#11273](https://github.com/ipfs/kubo/pull/11273). + +#### 📂 FUSE Mount Improvements + +The FUSE implementation has been rewritten on top of [`hanwen/go-fuse` v2](https://github.com/hanwen/go-fuse), replacing the unmaintained `bazil.org/fuse`. This fixes long-standing architectural limitations and brings FUSE mounts much closer to what standard tools expect. FUSE support is still experimental. See [docs/fuse.md](https://github.com/ipfs/kubo/blob/master/docs/fuse.md) for setup instructions, and report problems at [kubo/issues](https://github.com/ipfs/kubo/issues). + +- **`fsync` works.** Editors (vim, emacs) and databases that call `fsync` after writing no longer get a silent no-op. Data is flushed through the open file descriptor to the DAG. The full vim save sequence (O_TRUNC + write + fsync + chmod) is tested. +- **`ftruncate` works.** Tools like `rsync --inplace` that shrink or grow files via `ftruncate(fd, size)` no longer get ENOTSUP. Opening existing files with `O_TRUNC` also works correctly. +- **`chmod` and `touch` no longer drop file content.** Setting mode or mtime on a file with `Mounts.StoreMode`/`StoreMtime` enabled previously replaced the DAG node without preserving content links, making the file appear empty. +- **Symlink creation on writable mounts.** `ln -s target link` now works on `/mfs` and `/ipns`. Symlinks are stored as UnixFS TSymlink nodes, the same format used by `ipfs add`. +- **Rename-over-existing works.** Renaming a file onto an existing name (the pattern used by rsync and atomic-save editors) now correctly replaces the target. +- **Faster reads on `/ipfs`.** Files are read sequentially from the block graph instead of re-resolving from the root on every read call. +- **Killing a stuck `cat` works.** Interrupting a read (Ctrl-C, kill) cancels in-flight block fetches instead of hanging. +- **External unmount detected.** Running `fusermount -u` from outside the daemon now correctly marks the mount as inactive. +- **Files are no longer owned by root.** Mounts report the uid/gid of the daemon process, so access works without `allow_other`. +- **Offline IPNS writes succeed.** IPNS records are stored locally and published when connectivity returns. +- **Empty directories list correctly.** Listing an empty directory on `/ipfs` or `/ipns` no longer returns an error. +- **Bare file CIDs work on `/ipfs`.** Accessing a file by its CID directly under the `/ipfs` mount now works. This was a [long-standing regression](https://github.com/ipfs/kubo/issues/9044). +- **Rename works on `/mfs` and `/ipns`.** Renaming a file within the same directory no longer leaves the source behind. +- **IPNS FUSE publish works.** Writing files to `/ipns/local/` now correctly publishes the updated DAG. Previously IPNS publishing from the FUSE mount was silently blocked. +- **Concurrent IPNS file operations no longer race.** The `/ipns` file handle serializes Read, Write, Flush, and Release, matching the `/mfs` mount. +- **IPNS directory operations flush immediately.** Remove and Rename on `/ipns` flush changes to the MFS root, preventing data loss on daemon restart. +- **New files use the correct CID version.** Files created on `/ipns` inherit the parent's CID settings instead of falling back to CIDv0. +- **UnixFS mode and mtime visible in stat.** All three mounts show POSIX mode and mtime from [UnixFS](https://specs.ipfs.tech/unixfs/) metadata when present. When absent, sensible POSIX defaults are used (files: `0644`/`0444`, directories: `0755`/`0555`). +- **Opt-in `Mounts.StoreMtime` and `Mounts.StoreMode`.** Writable mounts can persist mtime on file creation/write and POSIX mode on `chmod` for both files and directories. `touch` on directories also works, which tools like `tar` and `rsync` rely on. Both flags are off by default because they change the resulting CID. See [`Mounts.StoreMtime`](https://github.com/ipfs/kubo/blob/master/docs/config.md#mountsstoremtime) and [`Mounts.StoreMode`](https://github.com/ipfs/kubo/blob/master/docs/config.md#mountsstoremode). +- **`ipfs.cid` xattr on all mounts.** All three mounts expose the node's CID via the `ipfs.cid` extended attribute on files and directories. The legacy `ipfs_cid` xattr name (used in earlier versions of `/mfs`) is no longer supported; use `ipfs.cid` instead. +- **`statfs` works.** All three mounts report the free space of the volume backing the local IPFS repo, so `/mfs` correctly reflects how much new data can be onboarded. Fixes macOS Finder refusing copies with "not enough free space". +- **Per-entry `st_blocks` and `st_blksize` reflect UnixFS.** All three mounts fill `st_blocks` from the UnixFS file size so `du`, `ls -s`, `stat`, and "size on disk" in file managers match `ls -l`. Directories report a nominal 1 block so tools that treat 0 as "unsupported" behave correctly. `st_blksize` advertises a chunk-aligned preferred I/O size: `/mfs` and `/ipns` use [`Import.UnixFSChunker`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importunixfschunker), so `cp`, `dd`, and `rsync` buffer writes at the chunker boundary; `/ipfs` uses a stable 1 MiB hint since published CIDs have no single chunker. +- **Platform compatibility.** macOS detection updated from OSXFUSE 2.x to macFUSE 4.x. Linux no longer needs a `fusermount` symlink; [`hanwen/go-fuse`](https://github.com/hanwen/go-fuse) finds `fusermount3` natively. + +#### 📦 CARv2 import over HTTP API + +`ipfs dag import` of CARv2 files now works over the HTTP API. Previously it failed with `operation not supported` because the HTTP multipart stream falsely advertised seek support, which go-car relied on for CARv2 payload offset. See [#11253](https://github.com/ipfs/kubo/pull/11253). + +#### 🐹 Go 1.26, Once More with Feeling + +Kubo first shipped with [Go 1.26](https://go.dev/doc/go1.26) in v0.40.0, but [v0.40.1](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.40.md#v0401) had to downgrade to Go 1.25 because of a Windows crash in Go's overlapped I/O layer ([#11214](https://github.com/ipfs/kubo/issues/11214)). Go 1.26.2 fixes that regression upstream ([golang/go#78041](https://github.com/golang/go/issues/78041)), so Kubo is back on Go 1.26 across all platforms. + +You should see lower memory usage and reduced GC pauses thanks to the new Green Tea garbage collector (10-40% less GC overhead). Reading block data and API responses is faster due to `io.ReadAll` improvements (~2x faster, ~50% less memory). On 64-bit platforms, heap base address randomization adds a layer of security hardening. + +#### 📦️ Dependency updates + +- update `go-libp2p` to [v0.48.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.48.0) +- update `go-libp2p-kad-dht` to [v0.39.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.39.0) +- update `ipfs-webui` to [v4.12.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.12.0) +- update `gateway-conformance` tests to [v0.13](https://github.com/ipfs/gateway-conformance/releases/tag/v0.13.0) (incl. [v0.12](https://github.com/ipfs/gateway-conformance/releases/tag/v0.12.0), [v0.11](https://github.com/ipfs/gateway-conformance/releases/tag/v0.11.0)) +- update `boxo` to [v0.38.0](https://github.com/ipfs/boxo/releases/tag/v0.38.0) +- update `go-cid` to [v0.6.1](https://github.com/ipfs/go-cid/releases/tag/v0.6.1) (pulls in `go-multibase` [v0.3.0](https://github.com/multiformats/go-multibase/releases/tag/v0.3.0) with up to 5x faster base58 encoding for CIDv0) + +### 📝 Changelog + +### 👨‍👩‍👧‍👦 Contributors diff --git a/docs/changelogs/v0.42.md b/docs/changelogs/v0.42.md new file mode 100644 index 00000000000..17167733cda --- /dev/null +++ b/docs/changelogs/v0.42.md @@ -0,0 +1,22 @@ +# Kubo changelog v0.42 + + + +This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. + +- [v0.42.0](#v0420) + +## v0.42.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +### 🔦 Highlights + +### 📝 Changelog + +### 👨‍👩‍👧‍👦 Contributors diff --git a/docs/config.md b/docs/config.md index f6ea243a8ba..92d52275588 100644 --- a/docs/config.md +++ b/docs/config.md @@ -119,6 +119,8 @@ config file at runtime. - [`Mounts.IPNS`](#mountsipns) - [`Mounts.MFS`](#mountsmfs) - [`Mounts.FuseAllowOther`](#mountsfuseallowother) + - [`Mounts.StoreMtime`](#mountsstoremtime) + - [`Mounts.StoreMode`](#mountsstoremode) - [`Pinning`](#pinning) - [`Pinning.RemoteServices`](#pinningremoteservices) - [`Pinning.RemoteServices: API`](#pinningremoteservices-api) @@ -142,6 +144,7 @@ config file at runtime. - [`Provide.DHT.MaxProvideConnsPerWorker`](#providedhtmaxprovideconnsperworker) - [`Provide.DHT.KeystoreBatchSize`](#providedhtkeystorebatchsize) - [`Provide.DHT.OfflineDelay`](#providedhtofflinedelay) + - [`Provide.BloomFPRate`](#providebloomfprate) - [`Provider`](#provider) - [`Provider.Enabled`](#providerenabled) - [`Provider.Strategy`](#providerstrategy) @@ -236,6 +239,7 @@ config file at runtime. - [`Import.UnixFSChunker`](#importunixfschunker) - [`Import.HashFunction`](#importhashfunction) - [`Import.FastProvideRoot`](#importfastprovideroot) + - [`Import.FastProvideDAG`](#importfastprovidedag) - [`Import.FastProvideWait`](#importfastprovidewait) - [`Import.BatchMaxNodes`](#importbatchmaxnodes) - [`Import.BatchMaxSize`](#importbatchmaxsize) @@ -1158,7 +1162,7 @@ dag-pb or dag-cbor to dag-json). When disabled (the default), the gateway returns `406 Not Acceptable` for codec mismatches, following behavior specified in -[IPIP-524](https://github.com/ipfs/specs/pull/524). +[IPIP-524](https://specs.ipfs.tech/ipips/ipip-0524/). Most users should keep this disabled unless legacy [IPLD Logical Format](https://web.archive.org/web/20260204204727/https://ipld.io/specs/codecs/dag-pb/spec/#logical-format) @@ -1900,12 +1904,20 @@ Default: `"cache"` > [!CAUTION] > **EXPERIMENTAL:** -> This feature is disabled by default, requires an explicit opt-in with `ipfs mount` or `ipfs daemon --mount`. +> This feature is disabled by default, requires an explicit opt-in with `ipfs mount` or `ipfs daemon --mount`. > -> Read about current limitations at [fuse.md](./fuse.md). +> See [fuse.md](./fuse.md) for setup instructions and platform-specific notes. FUSE mount point configuration options. +All mounts expose the `ipfs.cid` extended attribute on files and directories, returning the CID of the underlying DAG node: + +```console +$ getfattr -n ipfs.cid /ipfs/bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze/wiki/Cat +# file: ipfs/bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze/wiki/Cat +ipfs.cid="bafybeihxislsmn7b2drh6m3vqz3ctcfae46al7ax3543umeso4f5jgij5e" +``` + ### `Mounts.IPFS` Mountpoint for `/ipfs/`. @@ -1937,7 +1949,31 @@ Type: `string` (filesystem path) ### `Mounts.FuseAllowOther` -Sets the 'FUSE allow-other' option on the mount point. +Sets the FUSE `allow_other` mount option, letting users other than the mounter access the mounted filesystem. + +Default: `false` + +Type: `flag` + +### `Mounts.StoreMtime` + +When `true`, writable mounts (`/ipns` and `/mfs`) store the current time as mtime in [UnixFS](https://specs.ipfs.tech/unixfs/) metadata when creating a file or opening it for writing. Setting mtime explicitly via `touch` works on both files and directories. This changes the resulting CID even when the file content is identical, because mtime is stored in the [root block of the UnixFS DAG](https://specs.ipfs.tech/unixfs/#dag-pb-optional-metadata). + +Most data on IPFS does not include mtime. When mtime is present in the UnixFS metadata, it is always shown in stat responses on all mounts, regardless of this flag. When absent, mtime is reported as zero (epoch). + +Default: `false` + +Type: `flag` + +### `Mounts.StoreMode` + +When `true`, writable mounts (`/ipns` and `/mfs`) accept `chmod` requests on both files and directories and persist POSIX permission bits in [UnixFS](https://specs.ipfs.tech/unixfs/) metadata. This changes the resulting CID because mode is stored in the [root block of the UnixFS DAG](https://specs.ipfs.tech/unixfs/#dag-pb-optional-metadata). + +Most data on IPFS does not include mode. When mode is present in the UnixFS metadata, it is always shown in stat responses on all mounts, regardless of this flag. When absent, a default mode is used (files: `0644` on writable mounts, `0444` on `/ipfs`; directories: `0755` on writable mounts, `0555` on `/ipfs`). + +Default: `false` + +Type: `flag` ## `Pinning` @@ -2056,31 +2092,74 @@ Type: `flag` ### `Provide.Strategy` -Tells the provide system what should be announced. Valid strategies are: +Controls which CIDs are announced to the content routing system. Valid strategies are: - `"all"` - announce all CIDs of stored blocks - `"pinned"` - only announce recursively pinned CIDs (`ipfs pin add -r`, both roots and child blocks) - Order: root blocks of direct and recursive pins are announced first, then the child blocks of recursive pins -- `"roots"` - only announce the root block of explicitly pinned CIDs (`ipfs pin add`) - - **⚠️ BE CAREFUL:** node with `roots` strategy will not announce child blocks. +- `"roots"` - only announce the top-level root CID of explicitly pinned DAGs (`ipfs pin add`) + - **⚠️ BE CAREFUL:** a node with `roots` strategy will not announce child blocks. It makes sense only for use cases where the entire DAG is fetched in full, and a graceful resume does not have to be guaranteed: the lack of child announcements means an interrupted retrieval won't be able to find providers for the missing block in the middle of a file, unless the peer happens to already be connected to a provider and asks for child CID over - bitswap. + bitswap. Does not traverse the DAG to discover sub-entity roots + (files within directories, HAMT shards, etc.). If you want that, use + `"pinned+entities"` instead. - `"mfs"` - announce only the local CIDs that are part of the MFS (`ipfs files`) - Note: MFS is lazy-loaded. Only the MFS blocks present in local datastore are announced. - `"pinned+mfs"` - a combination of the `pinned` and `mfs` strategies. - - **ℹ️ NOTE:** This is the suggested strategy for users who run without GC and don't want to provide everything in cache. - Order: first `pinned` and then the locally available part of `mfs`. -**Strategy changes automatically clear the provide queue.** When you change `Provide.Strategy` and restart Kubo, the provide queue is automatically cleared to ensure only content matching your new strategy is announced. You can also manually clear the queue using `ipfs provide clear`. +#### Strategy modifiers: `+unique` and `+entities` + +The `+unique` and `+entities` modifiers can be appended to `pinned`, `mfs`, or `pinned+mfs` strategies to optimize the reprovide cycle. They are incompatible with `"all"` and `"roots"`. + +- **`+unique`** -- uses a bloom filter to deduplicate CIDs across recursive + pins that share sub-DAGs. Without this, a node with 1000 pins that share 99% + of their content re-traverses the shared blocks for each pin. With `+unique`, + shared subtrees are detected and skipped, reducing traversal from + O(pins * total_blocks) to O(unique_blocks). This also significantly reduces + the amount of CIDs sent to the routing system when similar datasets are + pinned multiple times. +- **`+entities`** -- announces only entity roots (file roots, directory roots, + HAMT shard nodes) instead of every block. Internal file chunks are not + announced. This significantly reduces the number of provider records for + repositories with large files while keeping all files and directories + discoverable. Implies `+unique`. Non-UnixFS content (e.g. dag-cbor) is + still fully announced. + - **⚠️ BE CAREFUL:** since internal file chunks are not announced, resuming + an interrupted download from a specific byte offset or requesting a byte + range may not work unless the client is smart enough to find providers + for the entity root CID instead of the chunk CID. This is a work in + progress; see [kubo#10251](https://github.com/ipfs/kubo/issues/10251). + +**Suggested configurations:** + +- `"pinned+mfs+unique"` -- safe default for nodes with GC enabled, or desktop + users who don't want to announce all blocks cached in the local repository. + Handles pins of similar DAGs efficiently (e.g. versioned datasets where pins + are added and removed over time). +- `"pinned+mfs+entities"` -- same as above, but also skips internal file chunks + for even fewer provider records. Use when the `+entities` trade-off (no + chunk-level discoverability) is acceptable. -**Memory requirements:** +#### Memory during reprovide -- Reproviding larger pinsets using the `mfs`, `pinned`, `pinned+mfs` or `roots` strategies requires additional memory, with an estimated ~1 GiB of RAM per 20 million CIDs for reproviding to the Amino DHT. -- This is due to the use of a buffered provider, which loads all CIDs into memory to avoid holding a lock on the entire pinset during the reprovide cycle. +Reproviding larger pinsets using the `mfs`, `pinned`, `pinned+mfs` or `roots` strategies requires additional memory, with an estimated ~1 GiB of RAM per 20 million CIDs. This is due to the use of a buffered provider, which loads all CIDs into memory to avoid holding a lock on the entire pinset during the reprovide cycle. + +With `+unique` or `+entities`, a bloom filter replaces the in-memory CID set, significantly reducing memory usage: + +- 2M CIDs: ~150 MB (default) vs ~8 MB (with `+unique` bloom filter) +- 10M CIDs: ~750 MB (default) vs ~42 MB (with `+unique` bloom filter) +- 100M CIDs: ~7.5 GB (default) vs ~713 MB (with `+unique` bloom filter) + +The bloom auto-scales: the first cycle starts small and grows as needed; subsequent cycles size correctly from the previous cycle's count. + +#### Notes + +**Strategy changes automatically clear the provide queue.** When you change `Provide.Strategy` and restart Kubo, the provide queue is automatically cleared to ensure only content matching your new strategy is announced. You can also manually clear the queue using `ipfs provide clear`. Default: `"all"` @@ -2438,6 +2517,42 @@ Default: `2h` Type: `optionalDuration` +### `Provide.BloomFPRate` + +Target false positive rate for the bloom filter used by the [`+unique` and +`+entities` strategy modifiers](#strategy-modifiers-unique-and-entities) and +the matching `--fast-provide-dag` walk. Expressed as `1/N` (one false positive +per `N` lookups), so a higher value means a lower FP rate but more memory per +CID. Has no effect when `Provide.Strategy` does not include `+unique` or +`+entities`. + +The bloom filter sizes itself from the previous reprovide cycle's CID count +and the configured FP rate. The auto-scaling described in +[Memory during reprovide](#memory-during-reprovide) is unaffected; this +setting only changes the bits-per-CID ratio of each bloom in the chain. + +Memory tradeoff (approximate, before `ipfs/bbloom`'s power-of-two rounding): + +| `Provide.BloomFPRate` | Approx. FP rate | Bytes per CID | +|-----------------------|-----------------|---------------| +| `1000000` | 1 in 1M | ~3 | +| (default) | ~1 in 4.75M | ~4 | +| `10000000` | 1 in 10M | ~5 | +| `100000000` | 1 in 100M | ~6 | + +A false positive causes the walker to skip a CID it has already been told +about; the skipped CID is provided in the next reprovide cycle (see +[`Provide.DHT.Interval`](#providedhtinterval)). At the default rate, fewer +than ~21 CIDs per 100M are skipped per cycle. + +The minimum accepted value is `1000000` (1 in 1M). Below that the bloom +filter becomes lossy enough to drop a meaningful fraction of CIDs from each +reprovide cycle. + +Default: `4750000` (~1 false positive per 4.75M lookups, cost at ~4 bytes per CID) + +Type: `optionalInteger` + ## `Provider` ### `Provider.Enabled` @@ -2829,7 +2944,7 @@ It specifies the routing type that will be created. Currently supported types: -- `http` simple delegated routing based on HTTP protocol from [IPIP-337](https://github.com/ipfs/specs/pull/337) +- `http` simple delegated routing based on HTTP protocol from [IPIP-337](https://specs.ipfs.tech/ipips/ipip-0337/) - `dht` provides decentralized routing based on [libp2p's kad-dht](https://github.com/libp2p/specs/tree/master/kad-dht) - `parallel` and `sequential`: Helpers that can be used to run several routers sequentially or in parallel. @@ -3695,7 +3810,7 @@ Type: `flag` Options to configure the default parameters used for ingesting data, in commands such as `ipfs add` or `ipfs block put`. All affected commands are detailed per option. -These options implement [IPIP-499: UnixFS CID Profiles](https://github.com/ipfs/specs/pull/499) for reproducible CID generation across IPFS implementations. Instead of configuring individual options, you can apply a predefined profile with `ipfs config profile apply `. See [Profiles](#profiles) for available options like `unixfs-v1-2025`. +These options implement [IPIP-499: UnixFS CID Profiles](https://specs.ipfs.tech/ipips/ipip-0499/) for reproducible CID generation across IPFS implementations. Instead of configuring individual options, you can apply a predefined profile with `ipfs config profile apply `. See [Profiles](#profiles) for available options like `unixfs-v1-2025`. Note that using CLI flags will override the options defined here. @@ -3757,29 +3872,41 @@ Type: `optionalString` ### `Import.FastProvideRoot` -Immediately provide root CIDs to the DHT in addition to the regular provide queue. +Immediately provide root CIDs to the routing system in addition to the regular provide queue. -This complements the sweep provider system: fast-provide handles the urgent case (root CIDs that users share and reference), while the sweep provider efficiently provides all blocks according to the `Provide.Strategy` over time. Together, they optimize for both immediate discoverability of newly imported content and efficient resource usage for complete DAG provides. +This complements the reprovide system: fast-provide handles the urgent case (root CIDs that users share and reference), while the reprovide cycle provides all blocks according to the [`Provide.Strategy`](#providestrategy) over time. -When disabled, only the sweep provider's queue is used. +When disabled, only the reprovide cycle handles content announcement. -This setting applies to both `ipfs add` and `ipfs dag import` commands and can be overridden per-command with the `--fast-provide-root` flag. - -Ignored when DHT is not available for routing (e.g., `Routing.Type=none` or delegated-only configurations). +Applies to `ipfs add`, `ipfs dag import`, `ipfs pin add`, and `ipfs pin update`. Can be overridden per-command with the `--fast-provide-root` flag. Default: `true` Type: `flag` +### `Import.FastProvideDAG` + +Walk and provide the full DAG immediately after content is added or pinned, using the active [`Provide.Strategy`](#providestrategy) to determine scope. + +When enabled with `+unique`, the DAG walk deduplicates via a bloom filter. When enabled with `+entities`, only entity roots (files, directories, HAMT shards) are provided. + +When disabled (default), only the root CID is provided immediately (via [`Import.FastProvideRoot`](#importfastprovideroot)) and child blocks are deferred to the reprovide cycle. + +Applies to `ipfs add`, `ipfs dag import`, `ipfs pin add`, and `ipfs pin update`. Can be overridden per-command with the `--fast-provide-dag` flag. Has no effect when `Provide.Strategy=all` (the blockstore already provides every block on write). + +Default: `false` + +Type: `flag` + ### `Import.FastProvideWait` -Wait for the immediate root CID provide to complete before returning. +Wait for the immediate provide to complete before returning. -When enabled, the command blocks until the provide completes, ensuring guaranteed discoverability before returning. When disabled (default), the provide happens asynchronously in the background without blocking the command. +When enabled, the command blocks until the provide completes, ensuring guaranteed discoverability before returning. When disabled (default), the provide happens asynchronously in the background without blocking the command. Applies to both [`Import.FastProvideRoot`](#importfastprovideroot) and [`Import.FastProvideDAG`](#importfastprovidedag). Use this when you need certainty that content is discoverable before the command returns (e.g., sharing a link immediately after adding). -This setting applies to both `ipfs add` and `ipfs dag import` commands and can be overridden per-command with the `--fast-provide-wait` flag. +Applies to `ipfs add`, `ipfs dag import`, `ipfs pin add`, and `ipfs pin update`. Can be overridden per-command with the `--fast-provide-wait` flag. Ignored when DHT is not available for routing (e.g., `Routing.Type=none` or delegated-only configurations). @@ -3858,7 +3985,7 @@ become too big or reach `MaxLinks`. A HAMT is a structure made of UnixFS nodes that store the list of elements in the folder. This option controls the maximum number of children that the HAMT nodes can have. -According to the [UnixFS specification](https://specs.ipfs.tech/unixfs/#hamt-structure-and-parameters), this value must be a power of 2, a multiple of 8 (for byte-aligned bitfields), and not exceed 1024 (to prevent denial-of-service attacks). +According to the [UnixFS specification](https://specs.ipfs.tech/unixfs/#hamt-structure-and-parameters), this value must be a power of 2, between 8 (for byte-aligned bitfields) and 1024 (to prevent denial-of-service attacks). Commands affected: `ipfs add`, `ipfs daemon` (globally overrides [`boxo/ipld/unixfs/io.DefaultShardWidth`](https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L30C5-L30C22)) @@ -3902,7 +4029,7 @@ Accepted values: The `block` estimation is recommended for new profiles as it provides more accurate threshold decisions and better cross-implementation consistency. -See [IPIP-499](https://github.com/ipfs/specs/pull/499) for more details. +See [IPIP-499](https://specs.ipfs.tech/ipips/ipip-0499/) for more details. Commands affected: `ipfs add` @@ -4147,7 +4274,7 @@ See for exact [`Imp > [!NOTE] > Use only when legacy CIDs are required. For new projects, use [`unixfs-v1-2025`](#unixfs-v1-2025-profile). > -> See [IPIP-499](https://github.com/ipfs/specs/pull/499) for more details. +> See [IPIP-499](https://specs.ipfs.tech/ipips/ipip-0499/) for more details. ### `legacy-cid-v0` profile @@ -4164,7 +4291,7 @@ See for exact [`Imp > [!NOTE] > This profile ensures CID consistency across different IPFS implementations. > -> See [IPIP-499](https://github.com/ipfs/specs/pull/499) for more details. +> See [IPIP-499](https://specs.ipfs.tech/ipips/ipip-0499/) for more details. ## Security diff --git a/docs/content-blocking.md b/docs/content-blocking.md index e894868ace2..f02e01a7ace 100644 --- a/docs/content-blocking.md +++ b/docs/content-blocking.md @@ -6,7 +6,7 @@
-Kubo ships with built-in support for denylist format from [IPIP-383](https://github.com/ipfs/specs/pull/383). +Kubo ships with built-in support for denylist format from [IPIP-383](https://specs.ipfs.tech/ipips/ipip-0383/). ## Default behavior @@ -41,7 +41,7 @@ caused the request to be blocked. ## Denylist file format -[NOpfs](https://github.com/ipfs-shipyard/nopfs) supports the format from [IPIP-383](https://github.com/ipfs/specs/pull/383). +[NOpfs](https://github.com/ipfs-shipyard/nopfs) supports the format from [IPIP-383](https://specs.ipfs.tech/ipips/ipip-0383/). Clear-text rules are simple: just put content paths to block, one per line. Paths with unicode and whitespace need to be percent-encoded: @@ -54,7 +54,7 @@ Paths with unicode and whitespace need to be percent-encoded: Sensitive content paths can be double-hashed to block without revealing them. Double-hashed list example: https://badbits.dwebops.pub/badbits.deny -See [IPIP-383](https://github.com/ipfs/specs/pull/383) for detailed format specification and more examples. +See [IPIP-383](https://specs.ipfs.tech/ipips/ipip-0383/) for detailed format specification and more examples. ## How to suspend blocking without removing denylists diff --git a/docs/environment-variables.md b/docs/environment-variables.md index cd900de9415..733df1ec344 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -112,9 +112,9 @@ Warning: Enabling tracing will likely affect performance. ## `IPFS_FUSE_DEBUG` -If SET, enables fuse debug logging. +When set to any non-empty value, enables verbose FUSE debug logging. Every FUSE operation (open, read, write, lookup, getattr, etc.) is logged to stderr with its arguments and return values. Useful for diagnosing mount issues or understanding what the kernel is requesting. -Default: false +Default: not set (no debug logging) ## `YAMUX_DEBUG` @@ -267,6 +267,28 @@ Reducing it slows down connection ballooning but might affect performance negati Default: [160](https://github.com/libp2p/go-libp2p/blob/master/p2p/net/swarm/swarm_dial.go#L91) (not set) +## `TEST_DHT_STUB` + +Lifts WAN DHT filters so kubo can operate against DHT peers on +loopback, enabling full end-to-end provide/findprovs/IPNS testing +without public internet access. All DHT code paths are exercised: +dial, protocol negotiation, message serialization, routing table +management. + +Filters removed on the WAN DHT when this variable is set: + +- `AddressFilter`: accepts loopback addresses (default rejects non-public) +- `QueryFilter`: accepts all peers (default rejects non-public) +- `RoutingTableFilter`: accepts all peers (default rejects non-public) +- `RoutingTablePeerDiversityFilter`: disabled (default caps same-IP peers to 3) + +In the CLI test harness, `h.BootstrapWithStubDHT(nodes)` spawns a +mini-DHT on the loopback interface and sets this variable on each +node automatically, allowing the loopback DHT to serve as a WAN +replacement. Tests do not need to set this variable externally. + +Default: disabled (not set) + # Tracing For tracing configuration, please check: https://github.com/ipfs/boxo/blob/main/docs/tracing.md diff --git a/docs/examples/kubo-as-a-library/go.mod b/docs/examples/kubo-as-a-library/go.mod index a0be7d15588..72b9839c469 100644 --- a/docs/examples/kubo-as-a-library/go.mod +++ b/docs/examples/kubo-as-a-library/go.mod @@ -1,20 +1,21 @@ module github.com/ipfs/kubo/examples/kubo-as-a-library -go 1.25.0 +go 1.26.2 // Used to keep this in sync with the current version of kubo. You should remove // this if you copy this example. replace github.com/ipfs/kubo => ./../../.. require ( - github.com/ipfs/boxo v0.37.0 + github.com/ipfs/boxo v0.38.0 github.com/ipfs/kubo v0.0.0-00010101000000-000000000000 - github.com/libp2p/go-libp2p v0.47.0 + github.com/libp2p/go-libp2p v0.48.0 github.com/multiformats/go-multiaddr v0.16.1 ) require ( - bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect + filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5 // indirect + filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect github.com/DataDog/zstd v1.5.7 // indirect github.com/Jorropo/jsync v1.0.1 // indirect @@ -41,7 +42,7 @@ require ( github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 // indirect github.com/dgraph-io/badger v1.6.2 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dunglas/httpsfv v1.1.0 // indirect @@ -65,24 +66,25 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect github.com/guillaumemichel/reservedpool v0.3.0 // indirect + github.com/hanwen/go-fuse/v2 v2.9.1-0.20260323175136-8b5aa92e8e7c // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs-shipyard/nopfs v0.0.14 // indirect github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 // indirect - github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/bbloom v0.1.0 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-block-format v0.2.3 // indirect - github.com/ipfs/go-cid v0.6.0 // indirect + github.com/ipfs/go-cid v0.6.1 // indirect github.com/ipfs/go-cidutil v0.1.1 // indirect github.com/ipfs/go-datastore v0.9.1 // indirect github.com/ipfs/go-ds-badger v0.3.4 // indirect github.com/ipfs/go-ds-flatfs v0.6.0 // indirect github.com/ipfs/go-ds-leveldb v0.5.2 // indirect github.com/ipfs/go-ds-measure v0.2.2 // indirect - github.com/ipfs/go-ds-pebble v0.5.9 // indirect + github.com/ipfs/go-ds-pebble v0.5.10 // indirect github.com/ipfs/go-dsqueue v0.2.0 // indirect github.com/ipfs/go-fs-lock v0.1.1 // indirect github.com/ipfs/go-ipfs-cmds v0.16.0 // indirect @@ -92,12 +94,12 @@ require ( github.com/ipfs/go-ipld-cbor v0.2.1 // indirect github.com/ipfs/go-ipld-format v0.6.3 // indirect github.com/ipfs/go-ipld-git v0.1.1 // indirect - github.com/ipfs/go-ipld-legacy v0.2.2 // indirect + github.com/ipfs/go-ipld-legacy v0.3.0 // indirect github.com/ipfs/go-libdht v0.5.0 // indirect github.com/ipfs/go-log/v2 v2.9.1 // indirect github.com/ipfs/go-metrics-interface v0.3.0 // indirect github.com/ipfs/go-peertaskqueue v0.8.3 // indirect - github.com/ipfs/go-test v0.2.3 // indirect + github.com/ipfs/go-test v0.3.0 // indirect github.com/ipfs/go-unixfsnode v1.10.3 // indirect github.com/ipld/go-car/v2 v2.16.0 // indirect github.com/ipld/go-codec-dagpb v1.7.0 // indirect @@ -116,7 +118,7 @@ require ( github.com/libp2p/go-doh-resolver v0.5.0 // indirect github.com/libp2p/go-flow-metrics v0.3.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kad-dht v0.38.0 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.39.1-0.20260326020727-bcbc21e9f633 // indirect github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect github.com/libp2p/go-libp2p-pubsub v0.15.0 // indirect github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect @@ -136,12 +138,12 @@ require ( github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect + github.com/mr-tron/base58 v1.3.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.5.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multibase v0.3.0 // indirect github.com/multiformats/go-multicodec v0.10.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.6.1 // indirect @@ -151,21 +153,18 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/pion/datachannel v1.5.10 // indirect - github.com/pion/dtls/v2 v2.2.12 // indirect - github.com/pion/dtls/v3 v3.1.1 // indirect + github.com/pion/dtls/v3 v3.1.2 // indirect github.com/pion/ice/v4 v4.0.10 // indirect github.com/pion/interceptor v0.1.40 // indirect github.com/pion/logging v0.2.4 // indirect github.com/pion/mdns/v2 v2.0.7 // indirect github.com/pion/randutil v0.1.0 // indirect - github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtcp v1.2.16 // indirect github.com/pion/rtp v1.8.19 // indirect github.com/pion/sctp v1.8.39 // indirect - github.com/pion/sdp/v3 v3.0.13 // indirect + github.com/pion/sdp/v3 v3.0.18 // indirect github.com/pion/srtp/v3 v3.0.6 // indirect - github.com/pion/stun v0.6.1 // indirect - github.com/pion/stun/v3 v3.0.0 // indirect - github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/stun/v3 v3.1.1 // indirect github.com/pion/transport/v3 v3.0.7 // indirect github.com/pion/transport/v4 v4.0.1 // indirect github.com/pion/turn/v4 v4.0.2 // indirect @@ -174,8 +173,8 @@ require ( github.com/polydawn/refmt v0.89.1-0.20231129105047-37766d95467a // indirect github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.17.0 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/procfs v0.20.1 // indirect github.com/quic-go/qpack v0.6.0 // indirect github.com/quic-go/quic-go v0.59.0 // indirect github.com/quic-go/webtransport-go v0.10.0 // indirect @@ -193,15 +192,15 @@ require ( github.com/zeebo/blake3 v0.2.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect - go.opentelemetry.io/otel v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 // indirect - go.opentelemetry.io/otel/metric v1.40.0 // indirect - go.opentelemetry.io/otel/sdk v1.40.0 // indirect - go.opentelemetry.io/otel/trace v1.40.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect + go.opentelemetry.io/otel v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.42.0 // indirect + go.opentelemetry.io/otel/metric v1.42.0 // indirect + go.opentelemetry.io/otel/sdk v1.42.0 // indirect + go.opentelemetry.io/otel/trace v1.42.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/dig v1.19.0 // indirect go.uber.org/fx v1.24.0 // indirect @@ -209,23 +208,23 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.1 // indirect go.uber.org/zap/exp v0.3.0 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v2 v2.4.4 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.48.0 // indirect - golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect - golang.org/x/mod v0.33.0 // indirect - golang.org/x/net v0.50.0 // indirect - golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.41.0 // indirect - golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4 // indirect - golang.org/x/text v0.34.0 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 // indirect + golang.org/x/mod v0.34.0 // indirect + golang.org/x/net v0.52.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect + golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c // indirect + golang.org/x/text v0.36.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.42.0 // indirect + golang.org/x/tools v0.43.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.17.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect - google.golang.org/grpc v1.78.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect + google.golang.org/grpc v1.79.2 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect diff --git a/docs/examples/kubo-as-a-library/go.sum b/docs/examples/kubo-as-a-library/go.sum index 969cf1187db..c6a9a22a75b 100644 --- a/docs/examples/kubo-as-a-library/go.sum +++ b/docs/examples/kubo-as-a-library/go.sum @@ -1,5 +1,3 @@ -bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510= -bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -39,6 +37,10 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5 h1:JA0fFr+kxpqTdxR9LOBiTWpGNchqmkcsgmdeJZRclZ0= +filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5/go.mod h1:OjOXDNlClLblvXdwgFFOQFJEocLhhtai8vGLy0JCZlI= +filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b h1:REI1FbdW71yO56Are4XAxD+OS/e+BQsB3gE4mZRQEXY= +filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b/go.mod h1:9nnw1SlYHYuPSo/3wjQzNjSbeHlq2NsKo5iEtfJPWP0= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= @@ -86,6 +88,8 @@ github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnO github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4= github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA= github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= +github.com/canonical/go-sp800.90a-drbg v0.0.0-20210314144037-6eeb1040d6c3 h1:oe6fCvaEpkhyW3qAicT0TnGtyht/UrgvOwMcEgLb7Aw= +github.com/canonical/go-sp800.90a-drbg v0.0.0-20210314144037-6eeb1040d6c3/go.mod h1:qdP0gaj0QtgX2RUZhnlVrceJ+Qln8aSlDyJwelLLFeM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= @@ -144,8 +148,8 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 h1:5RVFMOWjMyRy8cARdy79nAmgYw3hK/4HUq48LQ6Wwqo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= @@ -304,12 +308,14 @@ github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWS github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= github.com/guillaumemichel/reservedpool v0.3.0 h1:eqqO/QvTllLBrit7LVtVJBqw4cD0WdV9ajUe7WNTajw= github.com/guillaumemichel/reservedpool v0.3.0/go.mod h1:sXSDIaef81TFdAJglsCFCMfgF5E5Z5xK1tFhjDhvbUc= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hanwen/go-fuse/v2 v2.9.1-0.20260323175136-8b5aa92e8e7c h1:m4bneA0dtaIhyTOJZCvcka670ZwDEiSomj5EARK1Jxc= +github.com/hanwen/go-fuse/v2 v2.9.1-0.20260323175136-8b5aa92e8e7c/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -344,10 +350,10 @@ github.com/ipfs-shipyard/nopfs v0.0.14 h1:HFepJt/MxhZ3/GsLZkkAPzIPdNYKaLO1Qb7YmP github.com/ipfs-shipyard/nopfs v0.0.14/go.mod h1:mQyd0BElYI2gB/kq/Oue97obP4B3os4eBmgfPZ+hnrE= github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcdHUd7SDsUOY= github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU= -github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= -github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.37.0 h1:2E3mZvydMI2t5IkAgtkmZ3sGsld0oS7o3I+xyzDk6uI= -github.com/ipfs/boxo v0.37.0/go.mod h1:8yyiRn54F2CsW13n0zwXEPrVsZix/gFj9SYIRYMZ6KE= +github.com/ipfs/bbloom v0.1.0 h1:nIWwfIE3AaG7RCDQIsrUonGCOTp7qSXzxH7ab/ss964= +github.com/ipfs/bbloom v0.1.0/go.mod h1:lDy3A3i6ndgEW2z1CaRFvDi5/ZTzgM1IxA/pkL7Wgts= +github.com/ipfs/boxo v0.38.0 h1:Kt/swuNXAtVXs7EP6KEjB5+2lo5/tTrvWzjakQ8IiOo= +github.com/ipfs/boxo v0.38.0/go.mod h1:A6DRpImSXihx6MiEHOeBjXqleDqK5JX3yWDxM0WygPo= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= @@ -356,8 +362,8 @@ github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xg github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30= -github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ= +github.com/ipfs/go-cid v0.6.1 h1:T5TnNb08+ueovG76Z5gx1L4Y7QOaGTXHg1F6raWFxIc= +github.com/ipfs/go-cid v0.6.1/go.mod h1:zrY0SwOhjrrIdfPQ/kf+k1sXyJ0QE7cMxfCployLBs0= github.com/ipfs/go-cidutil v0.1.1 h1:COuby6H8C2ml0alvHYX3WdbFM4F07YtbY0UlT5j+sgI= github.com/ipfs/go-cidutil v0.1.1/go.mod h1:SCoUftGEUgoXe5Hjeyw5CiLZF8cwYn/TbtpFQXJCP6k= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= @@ -376,8 +382,8 @@ github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo= github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICkqGI= github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs= -github.com/ipfs/go-ds-pebble v0.5.9 h1:D1FEuMxjbEmDADNqsyT74n9QHVAn12nv9i9Qa15AFYc= -github.com/ipfs/go-ds-pebble v0.5.9/go.mod h1:XmUBN05l6B+tMg7mpMS75ZcKW/CX01uZMhhWw85imQA= +github.com/ipfs/go-ds-pebble v0.5.10 h1:MsSPrq4ubtaWGaIvdE5+L227wEaoxs7nWEb6+lKojNE= +github.com/ipfs/go-ds-pebble v0.5.10/go.mod h1:ShbyLsills0WD9TJavOHu7uEDj/LwDW1WW91G4+W4X8= github.com/ipfs/go-dsqueue v0.2.0 h1:MBi9w3oSiX98Xc+Y7NuJ9G8MI6mAT4IGdO9dHEMCZzU= github.com/ipfs/go-dsqueue v0.2.0/go.mod h1:8FfNQC4DMF/KkzBXRNB9Rb3MKDW0Sh98HMtXYl1mLQE= github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw= @@ -401,8 +407,8 @@ github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rA github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk= github.com/ipfs/go-ipld-git v0.1.1 h1:TWGnZjS0htmEmlMFEkA3ogrNCqWjIxwr16x1OsdhG+Y= github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYDpKUkJubI= -github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ= -github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU= +github.com/ipfs/go-ipld-legacy v0.3.0 h1:7XhFKkRyCvP5upOlQfKUFIqL3S5DEZnbUE4bQmQ/tNE= +github.com/ipfs/go-ipld-legacy v0.3.0/go.mod h1:Ukef9ARQiX+RVetwH2XiReLgJvQDEXcUPszrZ1KRjKI= github.com/ipfs/go-libdht v0.5.0 h1:ZN+eCqwahZvUeT0e4DsIxRtm78Mc9UR5tmZUiMsrGjQ= github.com/ipfs/go-libdht v0.5.0/go.mod h1:L3YiuFXecLeZZFuuVRM0hjg1GgVhARzUdahFsuqSa7w= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= @@ -412,8 +418,8 @@ github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6 github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= github.com/ipfs/go-peertaskqueue v0.8.3 h1:tBPpGJy+A92RqtRFq5amJn0Uuj8Pw8tXi0X3eHfHM8w= github.com/ipfs/go-peertaskqueue v0.8.3/go.mod h1:OqVync4kPOcXEGdj/LKvox9DCB5mkSBeXsPczCxLtYA= -github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= -github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= +github.com/ipfs/go-test v0.3.0 h1:0Y4Uve3tp9HI+2lIJjfOliOrOgv/YpXg/l1y3P4DEYE= +github.com/ipfs/go-test v0.3.0/go.mod h1:JK+U8pRpATZb7lsYNSJlCj3WYB3cFfWIbI6nWRM/GFk= github.com/ipfs/go-unixfsnode v1.10.3 h1:c8sJjuGNkxXAQH75P+f5ngPda/9T+DrboVA0TcDGvGI= github.com/ipfs/go-unixfsnode v1.10.3/go.mod h1:2Jlc7DoEwr12W+7l8Hr6C7XF4NHST3gIkqSArLhGSxU= github.com/ipld/go-car/v2 v2.16.0 h1:LWe0vmN/QcQmUU4tr34W5Nv5mNraW+G6jfN2s+ndBco= @@ -466,6 +472,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/libdns/libdns v1.0.0-beta.1 h1:KIf4wLfsrEpXpZ3vmc/poM8zCATXT2klbdPe6hyOBjQ= github.com/libdns/libdns v1.0.0-beta.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= @@ -480,14 +488,14 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= -github.com/libp2p/go-libp2p v0.47.0 h1:qQpBjSCWNQFF0hjBbKirMXE9RHLtSuzTDkTfr1rw0yc= -github.com/libp2p/go-libp2p v0.47.0/go.mod h1:s8HPh7mMV933OtXzONaGFseCg/BE//m1V34p3x4EUOY= +github.com/libp2p/go-libp2p v0.48.0 h1:h2BrLAgrj7X8bEN05K7qmrjpNHYA+6tnsGRdprjTnvo= +github.com/libp2p/go-libp2p v0.48.0/go.mod h1:Q1fBZNdmC2Hf82husCTfkKJVfHm2we5zk+NWmOGEmWk= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-kad-dht v0.38.0 h1:NToFzwvICo6ghDfSwuTmROCtl9LDXSZT1VawEbm4NUs= -github.com/libp2p/go-libp2p-kad-dht v0.38.0/go.mod h1:g/CefQilAnCMyUH52A6tUGbe17NgQ8q26MaZCA968iI= +github.com/libp2p/go-libp2p-kad-dht v0.39.1-0.20260326020727-bcbc21e9f633 h1:PcubpdBr1BBg39st+CqGp3EOX++DOBK6B/s07P31eMg= +github.com/libp2p/go-libp2p-kad-dht v0.39.1-0.20260326020727-bcbc21e9f633/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= @@ -562,14 +570,17 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.3.0 h1:K6Y13R2h+dku0wOqKtecgRnBUBPrZzLZy5aIj8lCcJI= +github.com/mr-tron/base58 v1.3.0/go.mod h1:2BuubE67DCSWwVfx37JWNG8emOC0sHEU4/HpcYgCLX8= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= @@ -588,8 +599,8 @@ github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDu github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multibase v0.3.0 h1:8helZD2+4Db7NNWFiktk2NePbF0boolBe6bDQvM4r68= +github.com/multiformats/go-multibase v0.3.0/go.mod h1:MoBLQPCkRTOL3eveIPO81860j2AQY8JwcnNlRkGRUfI= github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc= github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= @@ -642,40 +653,30 @@ github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4 github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= -github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= -github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= -github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/dtls/v3 v3.1.1 h1:wSLMam9Kf7DL1A74hnqRvEb9OT+aXPAsQ5VS+BdXOJ0= -github.com/pion/dtls/v3 v3.1.1/go.mod h1:7FGvVYpHsUV6+aywaFpG7aE4Vz8nBOx74odPRFue6cI= +github.com/pion/dtls/v3 v3.1.2 h1:gqEdOUXLtCGW+afsBLO0LtDD8GnuBBjEy6HRtyofZTc= +github.com/pion/dtls/v3 v3.1.2/go.mod h1:Hw/igcX4pdY69z1Hgv5x7wJFrUkdgHwAn/Q/uo7YHRo= github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= -github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8= github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so= github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= -github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtcp v1.2.16 h1:fk1B1dNW4hsI78XUCljZJlC4kZOPk67mNRuQ0fcEkSo= +github.com/pion/rtcp v1.2.16/go.mod h1:/as7VKfYbs5NIb4h6muQ35kQF/J0ZVNz2Z3xKoCBYOo= github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= -github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= -github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= +github.com/pion/sdp/v3 v3.0.18 h1:l0bAXazKHpepazVdp+tPYnrsy9dfh7ZbT8DxesH5ZnI= +github.com/pion/sdp/v3 v3.0.18/go.mod h1:ZREGo6A9ZygQ9XkqAj5xYCQtQpif0i6Pa81HOiAdqQ8= github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= -github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= -github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= -github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= -github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= -github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= -github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/stun/v3 v3.1.1 h1:CkQxveJ4xGQjulGSROXbXq94TAWu8gIX2dT+ePhUkqw= +github.com/pion/stun/v3 v3.1.1/go.mod h1:qC1DfmcCTQjl9PBaMa5wSn3x9IPmKxSdcCsxBcDBndM= github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= github.com/pion/transport/v4 v4.0.1 h1:sdROELU6BZ63Ab7FrOLn13M6YdJLY20wldXW2Cu2k8o= @@ -701,10 +702,10 @@ github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UH github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc= +github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo= github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw= @@ -769,7 +770,6 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= @@ -778,8 +778,6 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69 github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -803,7 +801,6 @@ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= @@ -835,26 +832,26 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= -go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= -go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 h1:MzfofMZN8ulNqobCmCAVbqVL5syHw+eB2qPRkCMA/fQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0/go.mod h1:E73G9UFtKRXrxhBsHtG00TB5WxX57lpsQzogDkqBTz8= -go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= -go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= -go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= -go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= -go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= -go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= -go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= -go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= +go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= +go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 h1:THuZiwpQZuHPul65w4WcwEnkX2QIuMT+UFoOrygtoJw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0/go.mod h1:J2pvYM5NGHofZ2/Ru6zw/TNWnEQp5crgyDeSrYpXkAw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 h1:zWWrB1U6nqhS/k6zYB74CjRpuiitRtLLi68VcgmOEto= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0/go.mod h1:2qXPNBX1OVRC0IwOnfo1ljoid+RD0QK3443EaqVlsOU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0 h1:uLXP+3mghfMf7XmV4PkGfFhFKuNWoCvvx5wP/wOXo0o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0/go.mod h1:v0Tj04armyT59mnURNUJf7RCKcKzq+lgJs6QSjHjaTc= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.42.0 h1:s/1iRkCKDfhlh1JF26knRneorus8aOwVIDhvYx9WoDw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.42.0/go.mod h1:UI3wi0FXg1Pofb8ZBiBLhtMzgoTm1TYkMvn71fAqDzs= +go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= +go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= +go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= +go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= +go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= +go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= +go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= +go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -874,8 +871,8 @@ go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U= go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= +go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -895,11 +892,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= -golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -910,8 +904,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= -golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -940,8 +934,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -989,12 +983,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= -golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1020,8 +1010,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1045,7 +1035,6 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1088,23 +1077,15 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= -golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4 h1:bTLqdHv7xrGlFbvf5/TXNxy/iUwwdkjhqQTJDjW7aj0= -golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4/go.mod h1:g5NllXBEermZrmR51cJDQxmJUHUOfRAaNyWBM+R+548= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c h1:6a8FdnNk6bTXBjR4AGKFgUKuo+7GnR3FX5L7CbveeZc= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c/go.mod h1:TpUTTEp9frx7rTdLpC9gFG9kdI7zVLFTFFlqaH2Cncw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1116,11 +1097,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= -golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1185,8 +1163,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= -golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= +golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= +golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1266,10 +1244,10 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= -google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1290,8 +1268,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= -google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU= +google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/docs/experimental-features.md b/docs/experimental-features.md index 2b490e44a49..d025fa89d79 100644 --- a/docs/experimental-features.md +++ b/docs/experimental-features.md @@ -326,11 +326,11 @@ We also support the use of protocol names of the form /x/$NAME/http where $NAME ## FUSE FUSE makes it possible to mount `/ipfs`, `/ipns` and `/mfs` namespaces in your OS, -allowing arbitrary apps access to IPFS using a subset of filesystem abstractions. +allowing arbitrary apps access to IPFS using standard filesystem operations. -It is considered EXPERIMENTAL due to limited (and buggy) support on some platforms. +It is considered EXPERIMENTAL due to limited support on some platforms. -See [fuse.md](./fuse.md) for more details. +See [fuse.md](./fuse.md) for setup instructions and details. ## Plugins @@ -599,7 +599,7 @@ ipfs config --json Experimental.GatewayOverLibp2p true - [ ] Needs more people to use and report on how well it works - [ ] Needs UX work for exposing non-recursive "HTTP transport" (NoFetch) over both libp2p and plain TCP (and sharing the configuration) - [ ] Needs a mechanism for HTTP handler to signal supported features ([IPIP-425](https://github.com/ipfs/specs/pull/425)) -- [ ] Needs an option for Kubo to detect peers that have it enabled and prefer HTTP transport before falling back to bitswap (and use CAR if peer supports dag-scope=entity from [IPIP-402](https://github.com/ipfs/specs/pull/402)) +- [ ] Needs an option for Kubo to detect peers that have it enabled and prefer HTTP transport before falling back to bitswap (and use CAR if peer supports dag-scope=entity from [IPIP-402](https://specs.ipfs.tech/ipips/ipip-0402/)) ## Accelerated DHT Client diff --git a/docs/fuse.md b/docs/fuse.md index b928b886073..d7f19e98c24 100644 --- a/docs/fuse.md +++ b/docs/fuse.md @@ -1,110 +1,114 @@ # FUSE -**EXPERIMENTAL:** FUSE support is limited, YMMV. +**EXPERIMENTAL:** FUSE support is functional but still evolving. Please report issues at [kubo/issues](https://github.com/ipfs/kubo/issues). Kubo makes it possible to mount `/ipfs`, `/ipns` and `/mfs` namespaces in your OS, -allowing arbitrary apps access to IPFS. +allowing arbitrary apps access to IPFS using standard filesystem operations. + +The underlying FUSE implementation uses [`hanwen/go-fuse`](https://github.com/hanwen/go-fuse). + +- [Install FUSE](#install-fuse) + - [Linux](#linux) + - [macOS](#macos) + - [FreeBSD](#freebsd) +- [Prepare mountpoints](#prepare-mountpoints) +- [Mounting IPFS](#mounting-ipfs) +- [MFS mountpoint](#mfs-mountpoint) +- [Mode and mtime](#mode-and-mtime) +- [Troubleshooting](#troubleshooting) ## Install FUSE -You will need to install and configure fuse before you can mount IPFS +You will need to install and configure FUSE before you can mount IPFS. #### Linux -Note: while this guide should work for most distributions, you may need to refer -to your distribution manual to get things working. +Install `fuse3` with your package manager: -Install `fuse` with your favorite package manager: -``` +```sh +# Debian / Ubuntu sudo apt-get install fuse3 + +# Fedora +sudo dnf install fuse3 + +# Arch +sudo pacman -S fuse3 ``` -On some older Linux distributions, you may need to add yourself to the `fuse` group. -(If no such group exists, you can probably skip this step) +On some older Linux distributions, you may need to add yourself to the `fuse` group +for `allow_other` support (if no `fuse` group exists, you can skip this step): + ```sh sudo usermod -a -G fuse ``` -Restart user session, if active, for the change to apply, either by restarting -ssh connection or by re-logging to the system. +Restart your session for the change to apply. -#### Mac OSX -- OSXFUSE +#### macOS -It has been discovered that versions of `osxfuse` prior to `2.7.0` will cause a -kernel panic. For everyone's sake, please upgrade (latest at time of writing is -`2.7.4`). The installer can be found at https://osxfuse.github.io/. There is -also a homebrew formula (`brew cask install osxfuse`) but users report best results -installing from the official OSXFUSE installer package. - -Note that `ipfs` attempts an automatic version check on `osxfuse` to prevent you -from shooting yourself in the foot if you have pre `2.7.0`. Since checking the -OSXFUSE version [is more complicated than it should be], running `ipfs mount` -may require you to install another binary: +Install [macFUSE](https://macfuse.github.io/): ```sh -go get github.com/jbenet/go-fuse-version/fuse-version +brew install --cask macfuse ``` -If you run into any problems installing FUSE or mounting IPFS, hop on IRC and -speak with us, or if you figure something new out, please add to this document! +After installation, open **System Settings > Privacy & Security** and allow the macFUSE kernel extension to load. A reboot may be required. + +Kubo automatically sets `volname`, `noapplexattr`, and `noappledouble` mount options on macOS: + +- `volname` shows the filesystem name (ipfs, ipns, mfs) in Finder instead of the generic "macfuse Volume 0" +- `noapplexattr` prevents Finder from probing Apple-private extended attributes on every file access, reducing unnecessary FUSE traffic on network-backed mounts +- `noappledouble` prevents macOS from creating `._` resource fork sidecar files, which would pollute the DAG with macOS-only metadata + +> [!NOTE] +> macOS has known FUSE limitations (frequent STATFS calls, limited notification support) that may affect performance. See the [`hanwen/go-fuse` macOS notes](https://github.com/hanwen/go-fuse#macos-support) for details. #### FreeBSD -```sh -sudo pkg install fusefs-ext2 -``` -Load the fuse kernel module: +Load the FUSE kernel module: + ```sh sudo kldload fusefs ``` To load automatically on boot: + ```sh -sudo echo fusefs_load="YES" >> /boot/loader.conf +echo 'fusefs_load="YES"' | sudo tee -a /boot/loader.conf ``` ## Prepare mountpoints -By default ipfs uses `/ipfs`, `/ipns` and `/mfs` directories for mounting, this can be -changed in config. You will have to create the `/ipfs`, `/ipns` and `/mfs` directories +By default ipfs uses `/ipfs`, `/ipns` and `/mfs` directories for mounting. These can be +changed in config (see [`Mounts`](https://github.com/ipfs/kubo/blob/master/docs/config.md#mounts)). You will have to create the directories explicitly. Note that modifying root requires sudo permissions. ```sh # make the directories -sudo mkdir /ipfs -sudo mkdir /ipns -sudo mkdir /mfs +sudo mkdir /ipfs /ipns /mfs # chown them so ipfs can use them without root permissions -sudo chown /ipfs -sudo chown /ipns -sudo chown /mfs +sudo chown /ipfs /ipns /mfs ``` -Depending on whether you are using OSX or Linux, follow the proceeding instructions. - -## Make sure IPFS daemon is not running - -You'll need to stop the IPFS daemon if you have it started, otherwise the mount will complain. - -``` -# Check to see if IPFS daemon is running -ps aux | grep ipfs +## Mounting IPFS -# Kill the IPFS daemon -pkill -f ipfs +Make sure no other IPFS daemon is already running, then start the daemon with FUSE mounts enabled: -# Verify that it has been killed +```sh +ipfs daemon --mount ``` -## Mounting IPFS +Or, if the daemon is already running: ```sh -ipfs daemon --mount +ipfs mount ``` If you wish to allow other users to use the mount points, edit `/etc/fuse.conf` -to enable non-root users, i.e.: +to enable non-root users: + ```sh # /etc/fuse.conf - Configuration file for Filesystem in Userspace (FUSE) @@ -117,44 +121,69 @@ user_allow_other ``` Next set `Mounts.FuseAllowOther` config option to `true`: + ```sh ipfs config --json Mounts.FuseAllowOther true ipfs daemon --mount ``` -If using FreeBSD, it is necessary to run `ipfs` as root: +## MFS mountpoint + +The `/mfs` mount exposes the MFS (Mutable File System) root as a FUSE filesystem. +This is the same virtual mutable filesystem as the one behind `ipfs files` commands +(see `ipfs files --help`), enabling manipulation of content-addressed data like regular files. + +Standard tools like `vim`, `rsync`, and `tar` work on writable mounts (`/mfs` and `/ipns`). +Operations like `fsync`, `ftruncate`, `chmod`, `touch`, and rename-over-existing are all supported. + +The CID for any file or directory is retrievable via the `ipfs.cid` +extended attribute: + ```sh -sudo HOME=$HOME ipfs daemon --mount +$ getfattr -n ipfs.cid /mfs/hello.txt +# file: mfs/hello.txt +ipfs.cid="bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4" ``` -## MFS mountpoint +> [!TIP] +> New IPFS nodes should run `ipfs config profile apply unixfs-v1-2025` to use CIDv1 with modern defaults. Without this, files default to CIDv0 (base58 `Qm...` hashes). + +## Mode and mtime -Kubo v0.35.0 and later supports mounting the MFS (Mutable File System) root as -a FUSE filesystem, enabling manipulation of content-addressed data like regular -files. The CID for any file or directory is retrievable via the `ipfs_cid` -extended attribute. +By default, IPFS does not persist POSIX file mode or modification time. Most content on IPFS +does not include this metadata. + +When mode or mtime is absent, FUSE mounts use sensible defaults: + +- Read-only mounts (`/ipfs`): files `0444`, directories `0555` +- Writable mounts (`/ipns`, `/mfs`): files `0644`, directories `0755` + +When UnixFS metadata is present in the DAG (e.g. content added with mode/mtime preservation), +all three mounts show the stored values in `stat` responses regardless of config flags. + +To persist mode and mtime when writing through FUSE, enable the opt-in config flags: ```sh -getfattr -n ipfs_cid /mfs/welcome-to-IPFS.jpg -getfattr: Removing leading '/' from absolute path names -# file: mfs/welcome-to-IPFS.jpg -ipfs_cid="QmaeXDdwpUeKQcMy7d5SFBfVB4y7LtREbhm5KizawPsBSH" +ipfs config --json Mounts.StoreMtime true +ipfs config --json Mounts.StoreMode true ``` -Please note that the operations supported by the MFS FUSE mountpoint are -limited. Since the MFS wasn't designed to store file attributes like ownership -information, permissions and creation date, some applications like `vim` and -`sed` may misbehave due to missing functionality. +These flags change the resulting CID even when file content is identical, because mode and mtime +are stored in the UnixFS DAG node metadata. + +See [`Mounts.StoreMtime`](https://github.com/ipfs/kubo/blob/master/docs/config.md#mountsstoremtime) and [`Mounts.StoreMode`](https://github.com/ipfs/kubo/blob/master/docs/config.md#mountsstoremode). ## Troubleshooting #### `Permission denied` or `fusermount: user has no write access to mountpoint` error in Linux Verify that the config file can be read by your user: + ```sh sudo ls -l /etc/fuse.conf -rw-r----- 1 root fuse 216 Jan 2 2013 /etc/fuse.conf ``` + In most distributions, the group named `fuse` will be created during fuse installation. You can check this with: @@ -163,19 +192,18 @@ sudo grep -q fuse /etc/group && echo fuse_group_present || echo fuse_group_missi ``` If the group is present, just add your regular user to the `fuse` group: + ```sh sudo usermod -G fuse -a ``` If the group didn't exist, create `fuse` group (add your regular user to it) and set necessary permissions, for example: + ```sh sudo chgrp fuse /etc/fuse.conf sudo chmod g+r /etc/fuse.conf ``` - Note that the use of `fuse` group is optional and may depend on your operating system. It is okay to use a different group as long as proper permissions are @@ -183,7 +211,7 @@ set for user running `ipfs mount` command. #### Mount command crashes and mountpoint gets stuck -``` +```sh sudo umount /ipfs sudo umount /ipns sudo umount /mfs @@ -192,12 +220,19 @@ sudo umount /mfs #### Mounting fails with "error mounting: could not resolve name" Make sure your node's IPNS address has a directory published: -``` -$ mkdir hello/; echo 'hello' > hello/hello.txt; ipfs add -rQ ./hello/ -QmU5PLEGqjetW4RAmXgHpEFL7nVCL3vFnEyrCKUfRk4MSq -$ ipfs name publish QmU5PLEGqjetW4RAmXgHpEFL7nVCL3vFnEyrCKUfRk4MSq +```sh +$ mkdir hello/; echo 'hello world' > hello/hello.txt +$ ipfs add -rQ ./hello/ +bafybeidhkumeonuwkebh2i4fc7o7lguehauradvlk57gzake6ggjsy372a + +$ ipfs name publish bafybeidhkumeonuwkebh2i4fc7o7lguehauradvlk57gzake6ggjsy372a ``` -If you manage to mount on other systems (or followed an alternative path to one -above), please contribute to these docs :D +#### Enabling debug logging + +Set the `IPFS_FUSE_DEBUG` environment variable before starting the daemon to log all FUSE operations to stderr: + +```sh +IPFS_FUSE_DEBUG=1 ipfs daemon --mount +``` diff --git a/fuse/fusetest/detect.go b/fuse/fusetest/detect.go new file mode 100644 index 00000000000..9885eaa6f14 --- /dev/null +++ b/fuse/fusetest/detect.go @@ -0,0 +1,58 @@ +// FUSE availability detection. go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse + +package fusetest + +import ( + "os" + "os/exec" + "runtime" + "testing" +) + +// fuseFlagFromEnv returns the value of TEST_FUSE if set, or empty string. +// Also checks the legacy TEST_NO_FUSE for backwards compatibility. +func fuseFlagFromEnv() string { + if v := os.Getenv("TEST_FUSE"); v != "" { + return v + } + // Legacy: TEST_NO_FUSE=1 is equivalent to TEST_FUSE=0 + if os.Getenv("TEST_NO_FUSE") == "1" { + return "0" + } + return "" +} + +// fuseAvailable checks whether FUSE is likely to work on this system +// and skips with a helpful message if not. +// +// hanwen/go-fuse supports Linux, macOS, and FreeBSD. NetBSD and OpenBSD +// are not supported: NetBSD uses PUFFS (a different protocol) and +// OpenBSD's FUSE support is not compatible with go-fuse's mount mechanism. +func fuseAvailable(t *testing.T) bool { + t.Helper() + + switch runtime.GOOS { + case "linux", "darwin", "freebsd": + default: + t.Skip("FUSE not supported on", runtime.GOOS) + return false + } + + if runtime.GOOS == "linux" { + // go-fuse tries fusermount3 first, then fusermount. + if _, err := exec.LookPath("fusermount"); err == nil { + return true + } + if _, err := exec.LookPath("fusermount3"); err == nil { + return true + } + t.Skip("neither fusermount nor fusermount3 found in PATH") + return false + } + + if _, err := exec.LookPath("umount"); err != nil { + t.Skip("umount not found in PATH") + } + return true +} diff --git a/fuse/fusetest/fusetest.go b/fuse/fusetest/fusetest.go new file mode 100644 index 00000000000..ea0e876fb77 --- /dev/null +++ b/fuse/fusetest/fusetest.go @@ -0,0 +1,100 @@ +//go:build (linux || darwin || freebsd) && !nofuse + +// Package fusetest provides test helpers shared across FUSE test packages. +package fusetest + +import ( + "os" + "syscall" + "testing" + + "github.com/hanwen/go-fuse/v2/fs" + "github.com/stretchr/testify/require" +) + +// SkipUnlessFUSE skips the test when FUSE is not available. +// +// Decision order: +// 1. TEST_FUSE=0 (or legacy TEST_NO_FUSE=1) → skip +// 2. TEST_FUSE=1 → run (CI should set this after installing fuse3) +// 3. Neither set → auto-detect based on platform and fusermount in PATH; +// skip with a helpful message if not found +func SkipUnlessFUSE(t *testing.T) { + t.Helper() + + if v := fuseFlagFromEnv(); v != "" { + if v == "0" { + t.Skip("FUSE tests disabled (TEST_FUSE=0)") + } + return // TEST_FUSE=1, run unconditionally + } + + fuseAvailable(t) // skips with a helpful message if not available +} + +// TestMount mounts root at a temp directory with the given options and +// registers an unmount cleanup. Returns the mount directory path. +// Callers set mount-specific options (timeouts, MaxReadAhead, etc.) +// before calling; this helper adds NullPermissions, UID, and GID. +func TestMount(t *testing.T, root fs.InodeEmbedder, opts *fs.Options) string { + t.Helper() + SkipUnlessFUSE(t) + mntDir := t.TempDir() + if opts == nil { + opts = &fs.Options{} + } + opts.NullPermissions = true + opts.UID = uint32(os.Getuid()) + opts.GID = uint32(os.Getgid()) + if opts.MountOptions.FsName == "" { + opts.MountOptions.FsName = "kubo-test" + } + server, err := fs.Mount(mntDir, root, opts) + MountError(t, err) + t.Cleanup(func() { _ = server.Unmount() }) + return mntDir +} + +// AssertStatfsNonZero calls syscall.Statfs on path and verifies the +// result contains real filesystem data (non-zero block counts with +// Bfree <= Blocks). This avoids the racy pattern of comparing two +// Statfs snapshots taken at different times. +func AssertStatfsNonZero(t *testing.T, path string) { + t.Helper() + var st syscall.Statfs_t + require.NoError(t, syscall.Statfs(path, &st)) + require.NotZero(t, st.Blocks, "expected non-zero Blocks for a real filesystem") + require.LessOrEqual(t, st.Bfree, st.Blocks, "Bfree must not exceed Blocks") +} + +// AssertStatBlocks stats path and checks that st_blocks matches the file +// size rounded up to 512-byte units (the POSIX stat convention) and that +// st_blksize matches wantBlksize. These are the fields du, ls -s, and +// stat read to report disk usage per entry. +func AssertStatBlocks(t *testing.T, path string, wantBlksize uint32) { + t.Helper() + fi, err := os.Stat(path) + require.NoError(t, err) + st, ok := fi.Sys().(*syscall.Stat_t) + require.True(t, ok, "expected *syscall.Stat_t from os.Stat on FUSE mount") + + wantBlocks := int64((fi.Size() + 511) / 512) + require.Equal(t, wantBlocks, int64(st.Blocks), + "st_blocks mismatch for %s (size=%d)", path, fi.Size()) + require.Equal(t, wantBlksize, uint32(st.Blksize), + "st_blksize mismatch for %s", path) +} + +// MountError handles a FUSE mount error. When TEST_FUSE=1 (CI), a mount +// failure is fatal because the environment is expected to have working FUSE. +// When auto-detecting (no TEST_FUSE set), mount failures cause a skip. +func MountError(t *testing.T, err error) { + t.Helper() + if err == nil { + return + } + if fuseFlagFromEnv() == "1" { + t.Fatal("FUSE mount failed (TEST_FUSE=1, expected FUSE to work):", err) + } + t.Skip("FUSE mount failed:", err) +} diff --git a/fuse/fusetest/writablesuite.go b/fuse/fusetest/writablesuite.go new file mode 100644 index 00000000000..4d7b612f298 --- /dev/null +++ b/fuse/fusetest/writablesuite.go @@ -0,0 +1,914 @@ +// Reusable test suite for writable FUSE mounts. +// +// RunWritableSuite exercises all filesystem operations shared by +// /mfs and /ipns. Each mount provides a MountFunc that creates a +// fresh writable mount. +// +//go:build (linux || darwin || freebsd) && !nofuse + +package fusetest + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "io" + mrand "math/rand" + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "testing" + "time" + + racedet "github.com/ipfs/go-detect-race" + "github.com/ipfs/kubo/fuse/writable" + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" +) + +// MountFunc creates a fresh writable FUSE mount and returns the root +// directory path. Cleanup is handled via t.Cleanup. +type MountFunc func(t *testing.T, cfg writable.Config) string + +// RunWritableSuite runs generic writable filesystem tests against +// the mount produced by mount. +func RunWritableSuite(t *testing.T, mount MountFunc) { + t.Run("ReadWrite", func(t *testing.T) { + dir := mount(t, writable.Config{}) + data := WriteFileOrFail(t, 500, filepath.Join(dir, "testfile")) + VerifyFile(t, filepath.Join(dir, "testfile"), data) + }) + + t.Run("AppendFile", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "appendme") + + part1 := RandBytes(200) + require.NoError(t, os.WriteFile(path, part1, 0o644)) + + f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0o644) + require.NoError(t, err) + part2 := RandBytes(300) + _, err = f.Write(part2) + require.NoError(t, err) + require.NoError(t, f.Close()) + + VerifyFile(t, path, append(part1, part2...)) + }) + + t.Run("MultiWrite", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "multiwrite") + + f, err := os.Create(path) + require.NoError(t, err) + var want []byte + for range 1001 { + b := []byte{byte(mrand.Intn(256))} + _, err := f.Write(b) + require.NoError(t, err) + want = append(want, b...) + } + require.NoError(t, f.Close()) + VerifyFile(t, path, want) + }) + + t.Run("EmptyDirListing", func(t *testing.T) { + dir := mount(t, writable.Config{}) + emptyDir := filepath.Join(dir, "emptydir") + require.NoError(t, os.Mkdir(emptyDir, 0o755)) + + entries, err := os.ReadDir(emptyDir) + require.NoError(t, err) + require.Empty(t, entries) + }) + + t.Run("Mkdir", func(t *testing.T) { + dir := mount(t, writable.Config{}) + nested := filepath.Join(dir, "a", "b", "c") + require.NoError(t, os.MkdirAll(nested, 0o755)) + + info, err := os.Stat(nested) + require.NoError(t, err) + require.True(t, info.IsDir()) + }) + + // Both fstat (on the open handle) and path-based stat must return + // the correct mode and size for a freshly created file. The kernel + // caches attrs from the Create response for AttrTimeout: if + // Dir.Create returns an empty EntryOut.Attr, fstat sees the cached + // zero values. A path-based stat does a fresh Lookup, which has its + // own attr-fill path; covering both shapes guards against future + // regressions on either side. + t.Run("CreateAttrsImmediate", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "freshfile") + + f, err := os.Create(path) + require.NoError(t, err) + defer f.Close() + + // fstat on the open handle: exercises the Create response cache. + fstatInfo, err := f.Stat() + require.NoError(t, err) + require.Equal(t, int64(0), fstatInfo.Size()) + require.Equal(t, os.FileMode(0o644), fstatInfo.Mode().Perm(), + "fstat on new file should report default mode, not cached zero") + + // Path-based stat: exercises Dir.Lookup → FileInode.fillAttr. + statInfo, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, int64(0), statInfo.Size()) + require.Equal(t, os.FileMode(0o644), statInfo.Mode().Perm(), + "stat on new file should report default mode, not cached zero") + }) + + // Same as CreateAttrsImmediate, but for mkdir. Mkdir does not return + // a file handle, so we open the directory afterwards and fstat its + // fd to exercise the inode-level path. Path-based stat exercises + // Lookup. Both must report the directory mode. + t.Run("MkdirAttrsImmediate", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "freshdir") + + require.NoError(t, os.Mkdir(path, 0o755)) + + // Path-based stat: exercises Dir.Lookup → Dir.fillAttr. + statInfo, err := os.Stat(path) + require.NoError(t, err) + require.True(t, statInfo.IsDir()) + require.Equal(t, os.FileMode(0o755), statInfo.Mode().Perm(), + "stat on new directory should report default mode, not cached zero") + + // fstat on an open directory fd: exercises Dir.Getattr. + f, err := os.Open(path) + require.NoError(t, err) + defer f.Close() + fstatInfo, err := f.Stat() + require.NoError(t, err) + require.True(t, fstatInfo.IsDir()) + require.Equal(t, os.FileMode(0o755), fstatInfo.Mode().Perm(), + "fstat on new directory should report default mode, not cached zero") + }) + + t.Run("RenameFile", func(t *testing.T) { + dir := mount(t, writable.Config{}) + src := filepath.Join(dir, "oldname") + dst := filepath.Join(dir, "newname") + + data := WriteFileOrFail(t, 300, src) + require.NoError(t, os.Rename(src, dst)) + + _, err := os.Stat(src) + require.True(t, os.IsNotExist(err)) + VerifyFile(t, dst, data) + }) + + t.Run("CrossDirRename", func(t *testing.T) { + dir := mount(t, writable.Config{}) + require.NoError(t, os.Mkdir(filepath.Join(dir, "src"), 0o755)) + require.NoError(t, os.Mkdir(filepath.Join(dir, "dst"), 0o755)) + + data := WriteFileOrFail(t, 200, filepath.Join(dir, "src", "file")) + require.NoError(t, os.Rename(filepath.Join(dir, "src", "file"), filepath.Join(dir, "dst", "file"))) + + _, err := os.Stat(filepath.Join(dir, "src", "file")) + require.True(t, os.IsNotExist(err)) + VerifyFile(t, filepath.Join(dir, "dst", "file"), data) + }) + + // Renaming a directory (not just a file inside it). The contained + // file must still be readable under the new path. + t.Run("DirRename", func(t *testing.T) { + dir := mount(t, writable.Config{}) + oldDir := filepath.Join(dir, "olddir") + newDir := filepath.Join(dir, "newdir") + + require.NoError(t, os.Mkdir(oldDir, 0o755)) + data := WriteFileOrFail(t, 200, filepath.Join(oldDir, "child")) + + require.NoError(t, os.Rename(oldDir, newDir)) + + _, err := os.Stat(oldDir) + require.True(t, os.IsNotExist(err)) + VerifyFile(t, filepath.Join(newDir, "child"), data) + }) + + t.Run("RemoveFile", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "removeme") + WriteFileOrFail(t, 100, path) + require.NoError(t, os.Remove(path)) + + _, err := os.Stat(path) + require.True(t, os.IsNotExist(err)) + }) + + t.Run("Rmdir", func(t *testing.T) { + dir := mount(t, writable.Config{}) + sub := filepath.Join(dir, "rmdir_target") + require.NoError(t, os.Mkdir(sub, 0o755)) + require.NoError(t, os.Remove(sub)) + + _, err := os.Stat(sub) + require.True(t, os.IsNotExist(err)) + }) + + t.Run("RemoveNonEmptyDirectory", func(t *testing.T) { + dir := mount(t, writable.Config{}) + sub := filepath.Join(dir, "nonempty") + require.NoError(t, os.Mkdir(sub, 0o755)) + WriteFileOrFail(t, 50, filepath.Join(sub, "child")) + + err := syscall.Rmdir(sub) + require.Error(t, err, "expected error removing non-empty directory") + + // After removing the child, rmdir succeeds. + require.NoError(t, os.Remove(filepath.Join(sub, "child"))) + require.NoError(t, os.Remove(sub)) + }) + + t.Run("DoubleEntryFailure", func(t *testing.T) { + dir := mount(t, writable.Config{}) + sub := filepath.Join(dir, "dupdir") + require.NoError(t, os.Mkdir(sub, 0o755)) + require.Error(t, os.Mkdir(sub, 0o755)) + }) + + t.Run("Fsync", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "fsyncme") + + f, err := os.Create(path) + require.NoError(t, err) + _, err = f.Write(RandBytes(500)) + require.NoError(t, err) + require.NoError(t, f.Sync()) + require.NoError(t, f.Close()) + }) + + // After fsync on the writer handle, a fresh reader on a different + // fd must see the synced data. This is the "vim wrote and called + // fsync; my other process should see it immediately" scenario. + t.Run("FsyncCrossHandle", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "fsynccross") + + want := RandBytes(500) + w, err := os.Create(path) + require.NoError(t, err) + _, err = w.Write(want) + require.NoError(t, err) + require.NoError(t, w.Sync()) + // w is intentionally still open: the cross-handle reader must + // see the data after fsync, not just after close. + + got, err := os.ReadFile(path) + require.NoError(t, err) + require.Equal(t, len(want), len(got), + "reader on fresh handle should see all bytes after fsync") + require.Equal(t, want, got, + "reader on a fresh handle should see data flushed by fsync") + + require.NoError(t, w.Close()) + }) + + t.Run("Ftruncate", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "truncme") + + f, err := os.Create(path) + require.NoError(t, err) + _, err = f.Write(RandBytes(1000)) + require.NoError(t, err) + require.NoError(t, f.Truncate(500)) + require.NoError(t, f.Close()) + + info, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, int64(500), info.Size()) + }) + + // truncate(path, size) without an open fd: uses a temporary + // write descriptor inside Setattr instead of ftruncate on an + // existing handle. + t.Run("TruncatePath", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "pathtrunc") + + WriteFileOrFail(t, 1000, path) + require.NoError(t, syscall.Truncate(path, 500)) + + info, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, int64(500), info.Size()) + }) + + t.Run("LargeFile", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "largefile") + size := 1024*1024 + 1 // 1 MiB + 1 byte + data := WriteFileOrFail(t, size, path) + VerifyFile(t, path, data) + }) + + t.Run("OpenTrunc", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "truncopen") + + WriteFileOrFail(t, 500, path) + + f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0o644) + require.NoError(t, err) + newData := RandBytes(200) + _, err = f.Write(newData) + require.NoError(t, err) + require.NoError(t, f.Close()) + + VerifyFile(t, path, newData) + }) + + t.Run("TempFileRename", func(t *testing.T) { + dir := mount(t, writable.Config{}) + target := filepath.Join(dir, "target") + tmp := filepath.Join(dir, ".target.tmp") + + WriteFileOrFail(t, 100, target) + newData := WriteFileOrFail(t, 200, tmp) + require.NoError(t, os.Rename(tmp, target)) + + VerifyFile(t, target, newData) + }) + + t.Run("SeekAndWrite", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "seekwrite") + data := WriteFileOrFail(t, 100, path) + + f, err := os.OpenFile(path, os.O_WRONLY, 0o644) + require.NoError(t, err) + patch := []byte("PATCHED") + _, err = f.WriteAt(patch, 10) + require.NoError(t, err) + require.NoError(t, f.Close()) + + copy(data[10:], patch) + VerifyFile(t, path, data) + }) + + // Writing past the end of an empty file. UnixFS may not store true + // sparse holes, but the visible read must report the requested + // offset and the data we wrote, with zero bytes filling the gap. + t.Run("SparseWrite", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "sparse") + + f, err := os.Create(path) + require.NoError(t, err) + payload := RandBytes(100) + _, err = f.WriteAt(payload, 1000) + require.NoError(t, err) + require.NoError(t, f.Close()) + + got, err := os.ReadFile(path) + require.NoError(t, err) + require.Equal(t, 1100, len(got), "size should include the gap before the written bytes") + require.True(t, bytes.Equal(payload, got[1000:]), "tail bytes should match the written payload") + // Bytes [0:1000] should read as zero. Don't assert byte-for-byte + // equality with a zero slice (would catch the same thing twice); + // require.NotContains over a sample is enough. + for _, b := range got[:1000] { + if b != 0 { + t.Fatalf("expected zero gap fill, got byte %d", b) + } + } + }) + + // O_EXCL: the second create on the same path must fail with an + // error that satisfies os.IsExist. Lock files, ssh-agent, and + // atomic file creation patterns rely on this. + t.Run("OExcl", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "exclfile") + + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o644) + require.NoError(t, err) + require.NoError(t, f.Close()) + + _, err = os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o644) + require.Error(t, err) + require.True(t, os.IsExist(err), "second O_EXCL create should fail with EEXIST, got %v", err) + }) + + t.Run("OverwriteExisting", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "overwrite") + + WriteFileOrFail(t, 500, path) + + f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0o644) + require.NoError(t, err) + newData := RandBytes(300) + _, err = f.Write(newData) + require.NoError(t, err) + require.NoError(t, f.Close()) + + VerifyFile(t, path, newData) + }) + + // Vim (with backupcopy=yes) save sequence: open O_TRUNC, write, fsync, chmod. + t.Run("VimSavePattern", func(t *testing.T) { + dir := mount(t, writable.Config{StoreMode: true}) + path := filepath.Join(dir, "vimsave") + + WriteFileOrFail(t, 200, path) + + f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0o644) + require.NoError(t, err) + newData := RandBytes(300) + _, err = f.Write(newData) + require.NoError(t, err) + require.NoError(t, f.Sync()) + require.NoError(t, f.Chmod(0o644)) + require.NoError(t, f.Close()) + + VerifyFile(t, path, newData) + }) + + // rsync default save: create temp file, write, rename over target. + t.Run("RsyncPattern", func(t *testing.T) { + dir := mount(t, writable.Config{}) + target := filepath.Join(dir, "rsync_target") + tmp := filepath.Join(dir, ".rsync_target.XXXXXX") + + WriteFileOrFail(t, 100, target) + newData := WriteFileOrFail(t, 200, tmp) + require.NoError(t, os.Rename(tmp, target)) + + VerifyFile(t, target, newData) + }) + + t.Run("Symlink", func(t *testing.T) { + dir := mount(t, writable.Config{}) + link := filepath.Join(dir, "mylink") + require.NoError(t, os.Symlink("/some/target", link)) + + got, err := os.Readlink(link) + require.NoError(t, err) + require.Equal(t, "/some/target", got) + }) + + // Verify that readdir reports symlinks with ModeSymlink so that + // tools like ls -l and find -type l see the correct file type. + t.Run("SymlinkReaddir", func(t *testing.T) { + dir := mount(t, writable.Config{}) + + // Create a regular file and a symlink in the same directory. + WriteFileOrFail(t, 100, filepath.Join(dir, "regular")) + require.NoError(t, os.Symlink("/some/target", filepath.Join(dir, "mylink"))) + + entries, err := os.ReadDir(dir) + require.NoError(t, err) + + found := false + for _, e := range entries { + if e.Name() == "mylink" { + require.Equal(t, os.ModeSymlink, e.Type()&os.ModeSymlink, + "readdir should report symlink type for mylink") + found = true + } + if e.Name() == "regular" { + require.Equal(t, os.FileMode(0), e.Type()&os.ModeSymlink, + "readdir should not report symlink type for regular file") + } + } + require.True(t, found, "symlink entry not found in readdir") + }) + + t.Run("SymlinkSetattr", func(t *testing.T) { + dir := mount(t, writable.Config{StoreMtime: true}) + link := filepath.Join(dir, "mtimelink") + require.NoError(t, os.Symlink("/some/target", link)) + + mtime := time.Date(2025, 6, 15, 12, 0, 0, 0, time.UTC) + require.NoError(t, Lchtimes(link, mtime)) + + var stat unix.Stat_t + require.NoError(t, unix.Lstat(link, &stat)) + gotMtime := time.Unix(stat.Mtim.Sec, stat.Mtim.Nsec) + require.WithinDuration(t, mtime, gotMtime, time.Second) + }) + + t.Run("FileSizeReporting", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "sizecheck") + data := WriteFileOrFail(t, 5555, path) + + info, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, int64(len(data)), info.Size()) + }) + + t.Run("FileAttributes", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "attrcheck") + WriteFileOrFail(t, 100, path) + + info, err := os.Stat(path) + require.NoError(t, err) + require.False(t, info.IsDir()) + require.Equal(t, "attrcheck", info.Name()) + require.Equal(t, int64(100), info.Size()) + }) + + t.Run("DefaultDirMode", func(t *testing.T) { + dir := mount(t, writable.Config{}) + sub := filepath.Join(dir, "modedir") + require.NoError(t, os.Mkdir(sub, 0o755)) + + info, err := os.Stat(sub) + require.NoError(t, err) + require.Equal(t, os.FileMode(0o755), info.Mode().Perm()) + }) + + // StoreMtime tests. + t.Run("StoreMtime/disabled", func(t *testing.T) { + dir := mount(t, writable.Config{StoreMtime: false}) + path := filepath.Join(dir, "nomtime") + WriteFileOrFail(t, 100, path) + + // Without StoreMtime, Getattr returns mtime=0 which the + // kernel reports as Unix epoch start. + info, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, time.Unix(0, 0), info.ModTime()) + }) + + t.Run("StoreMtime/enabled", func(t *testing.T) { + dir := mount(t, writable.Config{StoreMtime: true}) + path := filepath.Join(dir, "withmtime") + WriteFileOrFail(t, 100, path) + + info, err := os.Stat(path) + require.NoError(t, err) + require.False(t, info.ModTime().IsZero(), "mtime should be set when StoreMtime is on") + require.WithinDuration(t, time.Now(), info.ModTime(), 30*time.Second) + }) + + // StoreMode tests. + t.Run("StoreMode/disabled", func(t *testing.T) { + dir := mount(t, writable.Config{StoreMode: false}) + path := filepath.Join(dir, "nomode") + WriteFileOrFail(t, 100, path) + // chmod should not fail, even when not persisting + require.NoError(t, os.Chmod(path, 0o600)) + + info, err := os.Stat(path) + require.NoError(t, err) + // With StoreMode off, mode stays at default 0644. + require.Equal(t, os.FileMode(0o644), info.Mode().Perm()) + }) + + t.Run("StoreMode/enabled", func(t *testing.T) { + dir := mount(t, writable.Config{StoreMode: true}) + path := filepath.Join(dir, "withmode") + WriteFileOrFail(t, 100, path) + require.NoError(t, os.Chmod(path, 0o600)) + + info, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, os.FileMode(0o600), info.Mode().Perm()) + }) + + t.Run("SetuidBitsStripped", func(t *testing.T) { + dir := mount(t, writable.Config{StoreMode: true}) + path := filepath.Join(dir, "setuid") + WriteFileOrFail(t, 100, path) + + // Setuid, setgid, and sticky bits should be silently stripped + // because boxo's MFS exposes only the lower 9 permission bits. + require.NoError(t, os.Chmod(path, 0o4755)) + info, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, os.FileMode(0o755), info.Mode().Perm()) + }) + + t.Run("DirMtime", func(t *testing.T) { + dir := mount(t, writable.Config{StoreMtime: true}) + sub := filepath.Join(dir, "dirmtime") + require.NoError(t, os.Mkdir(sub, 0o755)) + + mtime := time.Date(2025, 6, 15, 12, 0, 0, 0, time.UTC) + require.NoError(t, os.Chtimes(sub, mtime, mtime)) + + info, err := os.Stat(sub) + require.NoError(t, err) + require.WithinDuration(t, mtime, info.ModTime(), time.Second) + }) + + t.Run("DirChmod", func(t *testing.T) { + dir := mount(t, writable.Config{StoreMode: true}) + sub := filepath.Join(dir, "dirchmod") + require.NoError(t, os.Mkdir(sub, 0o755)) + require.NoError(t, os.Chmod(sub, 0o700)) + + info, err := os.Stat(sub) + require.NoError(t, err) + require.Equal(t, os.FileMode(0o700), info.Mode().Perm()) + }) + + t.Run("XattrCID", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "xattrfile") + WriteFileOrFail(t, 100, path) + + buf := make([]byte, 256) + n, err := unix.Getxattr(path, "ipfs.cid", buf) + require.NoError(t, err) + require.NotEmpty(t, string(buf[:n])) + }) + + t.Run("UnknownXattr", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "xattrunk") + WriteFileOrFail(t, 50, path) + + buf := make([]byte, 256) + _, err := unix.Getxattr(path, "user.nonexistent", buf) + require.Error(t, err) + }) + + t.Run("ConcurrentWrites", func(t *testing.T) { + dir := mount(t, writable.Config{}) + nactors := 4 + filesPerActor := 400 + fileSize := 2000 + + if racedet.WithRace() { + nactors = 2 + filesPerActor = 50 + } + + data := make([][][]byte, nactors) + var wg sync.WaitGroup + for i := range nactors { + data[i] = make([][]byte, filesPerActor) + wg.Add(1) + go func(n int) { + defer wg.Done() + for j := range filesPerActor { + out, err := WriteFile(fileSize, filepath.Join(dir, fmt.Sprintf("%dFILE%d", n, j))) + if err != nil { + t.Error(err) + continue + } + data[n][j] = out + } + }(i) + } + wg.Wait() + + for i := range nactors { + for j := range filesPerActor { + if data[i][j] == nil { + continue + } + VerifyFile(t, filepath.Join(dir, fmt.Sprintf("%dFILE%d", i, j)), data[i][j]) + } + } + }) + + t.Run("ConcurrentRW", func(t *testing.T) { + dir := mount(t, writable.Config{}) + nfiles := 5 + readers := 5 + + content := make([][]byte, nfiles) + for i := range content { + content[i] = RandBytes(8196) + } + + // Write phase. + var wg sync.WaitGroup + for i := range nfiles { + wg.Go(func() { + if err := os.WriteFile(filepath.Join(dir, strconv.Itoa(i)), content[i], 0o644); err != nil { + t.Error(err) + } + }) + } + wg.Wait() + + // Read phase. + for i := range nfiles * readers { + wg.Go(func() { + got, err := os.ReadFile(filepath.Join(dir, strconv.Itoa(i/readers))) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(content[i/readers], got) { + t.Error("read and write not equal") + } + }) + } + wg.Wait() + }) + + // Large file concurrent reads: the kernel sends multiple Read + // requests via readahead on files bigger than max_read (128 KB). + // Without proper mutex serialization on the file handle, concurrent + // reads corrupt the DagReader's internal state. + t.Run("LargeFileConcurrentRead", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "largeconcurrent") + + size := 1024*1024 + 1 // 1 MiB + 1 byte + data := WriteFileOrFail(t, size, path) + + var wg sync.WaitGroup + for range 8 { + wg.Go(func() { + got, err := os.ReadFile(path) + if err != nil { + t.Errorf("ReadFile: %v", err) + return + } + if !bytes.Equal(got, data) { + t.Errorf("data mismatch: got %d bytes, want %d", len(got), len(data)) + } + }) + } + wg.Wait() + }) + + // Simulate the rsync --inplace pattern: one goroutine holds a + // file open for reading while another opens it for writing. + // MFS's desclock blocks a write-open while a read descriptor + // exists. The FUSE layer avoids this by creating a DagReader + // for read-only opens instead of going through MFS. + t.Run("ConcurrentReadWrite", func(t *testing.T) { + dir := mount(t, writable.Config{}) + path := filepath.Join(dir, "concurrent_rw") + + data := WriteFileOrFail(t, 50000, path) + + // Hold the file open for reading (like rsync's generator). + reader, err := os.Open(path) + require.NoError(t, err) + defer reader.Close() + + // Overwrite the file while the reader is still open + // (like rsync's receiver). + newData := RandBytes(60000) + require.NoError(t, os.WriteFile(path, newData, 0o644)) + + // The reader should still see the original snapshot. + got, err := io.ReadAll(reader) + require.NoError(t, err) + require.True(t, bytes.Equal(data, got), "reader should see original data") + + // A fresh read should see the new data. + got2, err := os.ReadFile(path) + require.NoError(t, err) + require.True(t, bytes.Equal(newData, got2), "new reader should see updated data") + }) + + t.Run("FSThrash", func(t *testing.T) { + dir := mount(t, writable.Config{}) + dirs := []string{dir} + dirlock := sync.RWMutex{} + filelock := sync.Mutex{} + files := make(map[string][]byte) + + ndirWorkers := 2 + nfileWorkers := 2 + ndirs := 100 + nfiles := 200 + + var wg sync.WaitGroup + + for i := range ndirWorkers { + wg.Add(1) + go func(worker int) { + defer wg.Done() + for j := range ndirs { + dirlock.RLock() + n := mrand.Intn(len(dirs)) + d := dirs[n] + dirlock.RUnlock() + + newDir := fmt.Sprintf("%s/dir%d-%d", d, worker, j) + if err := os.Mkdir(newDir, os.ModeDir); err != nil { + t.Error(err) + continue + } + dirlock.Lock() + dirs = append(dirs, newDir) + dirlock.Unlock() + } + }(i) + } + + for i := range nfileWorkers { + wg.Add(1) + go func(worker int) { + defer wg.Done() + for j := range nfiles { + dirlock.RLock() + n := mrand.Intn(len(dirs)) + d := dirs[n] + dirlock.RUnlock() + + name := fmt.Sprintf("%s/file%d-%d", d, worker, j) + data, err := WriteFile(2000+mrand.Intn(5000), name) + if err != nil { + t.Error(err) + continue + } + filelock.Lock() + files[name] = data + filelock.Unlock() + } + }(i) + } + + wg.Wait() + for name, data := range files { + got, err := os.ReadFile(name) + if err != nil { + t.Errorf("reading %s: %v", name, err) + continue + } + if !bytes.Equal(data, got) { + t.Errorf("data mismatch in %s", name) + } + } + }) +} + +// Test helpers exported for use by mount-specific tests. + +// RandBytes returns size random bytes. +func RandBytes(size int) []byte { + b := make([]byte, size) + if _, err := io.ReadFull(rand.Reader, b); err != nil { + panic(err) + } + return b +} + +// WriteFile writes size random bytes to path and returns the data. +func WriteFile(size int, path string) ([]byte, error) { + data := RandBytes(size) + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o666) + if err != nil { + return nil, err + } + _, err = f.Write(data) + if err != nil { + f.Close() + return nil, err + } + // Go's goroutine preemption (SIGURG) can interrupt the FUSE FLUSH + // inside close(), returning EINTR. This is not data loss: the write + // already succeeded and the kernel will still send RELEASE. + if err := f.Close(); err != nil && !errors.Is(err, syscall.EINTR) { + return nil, err + } + return data, nil +} + +// WriteFileOrFail calls WriteFile and fails the test on error. +func WriteFileOrFail(t *testing.T, size int, path string) []byte { + t.Helper() + data, err := WriteFile(size, path) + require.NoError(t, err) + return data +} + +// VerifyFile reads the file at path and asserts its contents match want. +func VerifyFile(t *testing.T, path string, want []byte) { + t.Helper() + got, err := os.ReadFile(path) + require.NoError(t, err) + require.Equal(t, len(want), len(got), "file size mismatch") + require.True(t, bytes.Equal(want, got), "file content mismatch") +} + +// CheckExists asserts that path exists. +func CheckExists(t *testing.T, path string) { + t.Helper() + _, err := os.Stat(path) + require.NoError(t, err) +} + +// Lchtimes sets mtime on a symlink without following it (lutimes). +// Go's os package has no Lchtimes, so we call utimensat directly. +func Lchtimes(path string, mtime time.Time) error { + ts := unix.NsecToTimespec(mtime.UnixNano()) + return unix.UtimesNanoAt(unix.AT_FDCWD, path, []unix.Timespec{ts, ts}, unix.AT_SYMLINK_NOFOLLOW) +} diff --git a/fuse/ipns/ipns_test.go b/fuse/ipns/ipns_test.go index 69cf22a2e89..92fe8e5571b 100644 --- a/fuse/ipns/ipns_test.go +++ b/fuse/ipns/ipns_test.go @@ -1,500 +1,188 @@ -//go:build !nofuse && !openbsd && !netbsd && !plan9 +//go:build (linux || darwin || freebsd) && !nofuse + +// Unit tests for the /ipns FUSE mount. +// Generic writable operations are exercised by the shared suite in +// fusetest.RunWritableSuite. This file contains the mount factory +// and IPNS-specific tests only. package ipns import ( "bytes" "context" - "fmt" - "io" - mrand "math/rand" "os" - "sync" "testing" - "bazil.org/fuse" + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" + "github.com/stretchr/testify/require" - core "github.com/ipfs/kubo/core" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/core" coreapi "github.com/ipfs/kubo/core/coreapi" - - fstest "bazil.org/fuse/fs/fstestutil" - racedet "github.com/ipfs/go-detect-race" - "github.com/ipfs/go-test/random" - ci "github.com/libp2p/go-libp2p-testing/ci" + iface "github.com/ipfs/kubo/core/coreiface" + "github.com/ipfs/kubo/fuse/fusetest" + fusemnt "github.com/ipfs/kubo/fuse/mount" + "github.com/ipfs/kubo/fuse/writable" ) -func maybeSkipFuseTests(t *testing.T) { - if ci.NoFuse() { - t.Skip("Skipping FUSE tests") - } -} - -func randBytes(size int) []byte { - b := make([]byte, size) - _, err := io.ReadFull(random.NewRand(), b) - if err != nil { - panic(err) - } - return b -} - -func mkdir(t *testing.T, path string) { - err := os.Mkdir(path, os.ModeDir) - if err != nil { - t.Fatal(err) - } -} - -func writeFileOrFail(t *testing.T, size int, path string) []byte { - data, err := writeFile(size, path) - if err != nil { - t.Fatal(err) - } - return data -} - -func writeFile(size int, path string) ([]byte, error) { - data := randBytes(size) - err := os.WriteFile(path, data, 0o666) - return data, err -} - -func verifyFile(t *testing.T, path string, wantData []byte) { - isData, err := os.ReadFile(path) - if err != nil { - t.Fatal(err) - } - if len(isData) != len(wantData) { - t.Fatal("Data not equal - length check failed") - } - if !bytes.Equal(isData, wantData) { - t.Fatal("Data not equal") - } +type mountWrap struct { + Dir string + Root *Root + server *fuse.Server + closed bool } -func checkExists(t *testing.T, path string) { - _, err := os.Stat(path) - if err != nil { - t.Fatal(err) +func (m *mountWrap) Close() { + if m.closed { + return } -} - -func closeMount(mnt *mountWrap) { - if err := recover(); err != nil { - log.Error("Recovered panic") - log.Error(err) + m.closed = true + if m.server != nil { + _ = m.server.Unmount() } - mnt.Close() + _ = m.Root.Close() } -type mountWrap struct { - *fstest.Mount - Fs *FileSystem -} +// fakeMount is a minimal mount.Mount that reports itself as active. +// This simulates the real daemon path where node.Mounts.Ipns is set +// after the FUSE filesystem is mounted, ensuring that checkPublishAllowed +// is actually exercised during tests (see issue #2168). +type fakeMount struct{} -func (m *mountWrap) Close() error { - m.Fs.Destroy() - m.Mount.Close() - return nil -} +func (fakeMount) MountPoint() string { return "/fake/ipns" } +func (fakeMount) Unmount() error { return nil } +func (fakeMount) IsActive() bool { return true } -func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *mountWrap) { +func setupIpnsTest(t *testing.T, nd *core.IpfsNode, cfgs ...config.Mounts) (*core.IpfsNode, *mountWrap) { t.Helper() - maybeSkipFuseTests(t) - - var err error - if node == nil { - node, err = core.NewNode(context.Background(), &core.BuildCfg{}) - if err != nil { - t.Fatal(err) - } - - err = InitializeKeyspace(node, node.PrivateKey) - if err != nil { - t.Fatal(err) - } - } + fusetest.SkipUnlessFUSE(t) - coreAPI, err := coreapi.NewCoreAPI(node) - if err != nil { - t.Fatal(err) - } - - fs, err := NewFileSystem(node.Context(), coreAPI, "", "") - if err != nil { - t.Fatal(err) - } - mnt, err := fstest.MountedT(t, fs, nil) - if err == fuse.ErrOSXFUSENotFound { - t.Skip(err) - } - if err != nil { - t.Fatalf("error mounting at temporary directory: %v", err) + var cfg config.Mounts + if len(cfgs) > 0 { + cfg = cfgs[0] } - return node, &mountWrap{ - Mount: mnt, - Fs: fs, - } -} - -func TestIpnsLocalLink(t *testing.T) { - nd, mnt := setupIpnsTest(t, nil) - defer mnt.Close() - name := mnt.Dir + "/local" - - checkExists(t, name) - - linksto, err := os.Readlink(name) - if err != nil { - t.Fatal(err) - } + var err error + if nd == nil { + nd, err = core.NewNode(context.Background(), &core.BuildCfg{}) + require.NoError(t, err) - if linksto != nd.Identity.String() { - t.Fatal("Link invalid") + err = InitializeKeyspace(nd, nd.PrivateKey) + require.NoError(t, err) } -} -// Test writing a file and reading it back. -func TestIpnsBasicIO(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - nd, mnt := setupIpnsTest(t, nil) - defer closeMount(mnt) + coreAPI, err := coreapi.NewCoreAPI(nd) + require.NoError(t, err) - fname := mnt.Dir + "/local/testfile" - data := writeFileOrFail(t, 10, fname) + key, err := coreAPI.Key().Self(nd.Context()) + require.NoError(t, err) - rbuf, err := os.ReadFile(fname) - if err != nil { - t.Fatal(err) - } + root, err := CreateRoot(nd.Context(), coreAPI, map[string]iface.Key{"local": key}, "", "", nd.Repo.Path(), cfg, config.Import{}) + require.NoError(t, err) - if !bytes.Equal(rbuf, data) { - t.Fatal("Incorrect Read!") - } + mntDir := t.TempDir() + server, err := fs.Mount(mntDir, root, &fs.Options{ + NullPermissions: true, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + EntryTimeout: &mutableCacheTime, + AttrTimeout: &mutableCacheTime, + MountOptions: fuse.MountOptions{ + FsName: "kubo-test", + MaxReadAhead: fusemnt.MaxReadAhead, + ExtraCapabilities: fusemnt.WritableMountCapabilities, + }, + }) + fusetest.MountError(t, err) - fname2 := mnt.Dir + "/" + nd.Identity.String() + "/testfile" - rbuf, err = os.ReadFile(fname2) - if err != nil { - t.Fatal(err) - } + mnt := &mountWrap{Dir: mntDir, Root: root, server: server} + t.Cleanup(mnt.Close) - if !bytes.Equal(rbuf, data) { - t.Fatal("Incorrect Read!") - } + nd.Mounts.Ipns = fakeMount{} + return nd, mnt } -// Test to make sure file changes persist over mounts of ipns. -func TestFilePersistence(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - node, mnt := setupIpnsTest(t, nil) - - fname := "/local/atestfile" - data := writeFileOrFail(t, 127, mnt.Dir+fname) - - mnt.Close() - - t.Log("Closed, opening new fs") - _, mnt = setupIpnsTest(t, node) - defer mnt.Close() - - rbuf, err := os.ReadFile(mnt.Dir + fname) - if err != nil { - t.Fatal(err) +// newIpnsMount is the factory for the shared writable suite. It creates +// an IPNS mount and returns the writable /local directory path. +func newIpnsMount(t *testing.T, cfg writable.Config) string { + t.Helper() + mountsCfg := config.Mounts{} + if cfg.StoreMtime { + mountsCfg.StoreMtime = config.True } - - if !bytes.Equal(rbuf, data) { - t.Fatalf("File data changed between mounts! sizes differ: %d != %d", len(data), len(rbuf)) + if cfg.StoreMode { + mountsCfg.StoreMode = config.True } + _, mnt := setupIpnsTest(t, nil, mountsCfg) + return mnt.Dir + "/local" } -func TestMultipleDirs(t *testing.T) { - node, mnt := setupIpnsTest(t, nil) - - t.Log("make a top level dir") - dir1 := "/local/test1" - mkdir(t, mnt.Dir+dir1) - - checkExists(t, mnt.Dir+dir1) - - t.Log("write a file in it") - data1 := writeFileOrFail(t, 4000, mnt.Dir+dir1+"/file1") - - verifyFile(t, mnt.Dir+dir1+"/file1", data1) - - t.Log("sub directory") - mkdir(t, mnt.Dir+dir1+"/dir2") - - checkExists(t, mnt.Dir+dir1+"/dir2") - - t.Log("file in that subdirectory") - data2 := writeFileOrFail(t, 5000, mnt.Dir+dir1+"/dir2/file2") - - verifyFile(t, mnt.Dir+dir1+"/dir2/file2", data2) - - mnt.Close() - t.Log("closing mount, then restarting") - - _, mnt = setupIpnsTest(t, node) - - checkExists(t, mnt.Dir+dir1) - - verifyFile(t, mnt.Dir+dir1+"/file1", data1) - - verifyFile(t, mnt.Dir+dir1+"/dir2/file2", data2) - mnt.Close() -} - -// Test to make sure the filesystem reports file sizes correctly. -func TestFileSizeReporting(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - _, mnt := setupIpnsTest(t, nil) - defer mnt.Close() - - fname := mnt.Dir + "/local/sizecheck" - data := writeFileOrFail(t, 5555, fname) - - finfo, err := os.Stat(fname) - if err != nil { - t.Fatal(err) - } - - if finfo.Size() != int64(len(data)) { - t.Fatal("Read incorrect size from stat!") - } +func TestWritableSuite(t *testing.T) { + fusetest.RunWritableSuite(t, newIpnsMount) } -// Test to make sure you can't create multiple entries with the same name. -func TestDoubleEntryFailure(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - _, mnt := setupIpnsTest(t, nil) - defer mnt.Close() - - dname := mnt.Dir + "/local/thisisadir" - err := os.Mkdir(dname, 0o777) - if err != nil { - t.Fatal(err) - } +// TestIpnsLocalLink verifies that /ipns/local is a symlink to the +// node's own peer ID directory. +func TestIpnsLocalLink(t *testing.T) { + nd, mnt := setupIpnsTest(t, nil) - err = os.Mkdir(dname, 0o777) - if err == nil { - t.Fatal("Should have gotten error one creating new directory.") - } + target, err := os.Readlink(mnt.Dir + "/local") + require.NoError(t, err) + require.Equal(t, nd.Identity.String(), target) } -func TestAppendFile(t *testing.T) { - if testing.Short() { - t.SkipNow() - } +// TestNamespaceRootMode verifies that the /ipns root has execute-only +// mode (not listable, only traversable). +func TestNamespaceRootMode(t *testing.T) { _, mnt := setupIpnsTest(t, nil) - defer mnt.Close() - - fname := mnt.Dir + "/local/file" - data := writeFileOrFail(t, 1300, fname) - - fi, err := os.OpenFile(fname, os.O_RDWR|os.O_APPEND, 0o666) - if err != nil { - t.Fatal(err) - } - - nudata := randBytes(500) - - n, err := fi.Write(nudata) - if err != nil { - t.Fatal(err) - } - err = fi.Close() - if err != nil { - t.Fatal(err) - } - if n != len(nudata) { - t.Fatal("Failed to write enough bytes.") - } - - data = append(data, nudata...) - - rbuf, err := os.ReadFile(fname) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(rbuf, data) { - t.Fatal("Data inconsistent!") - } + info, err := os.Stat(mnt.Dir) + require.NoError(t, err) + require.Equal(t, os.FileMode(0o111), info.Mode().Perm()) } -func TestConcurrentWrites(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - _, mnt := setupIpnsTest(t, nil) - defer mnt.Close() - - nactors := 4 - filesPerActor := 400 - fileSize := 2000 - - data := make([][][]byte, nactors) +// TestFilePersistence verifies that file data survives unmount and remount. +func TestFilePersistence(t *testing.T) { + nd, mnt := setupIpnsTest(t, nil) - if racedet.WithRace() { - nactors = 2 - filesPerActor = 50 - } + data := fusetest.RandBytes(4000) + require.NoError(t, os.WriteFile(mnt.Dir+"/local/persist", data, 0o644)) + mnt.Close() - wg := sync.WaitGroup{} - for i := 0; i < nactors; i++ { - data[i] = make([][]byte, filesPerActor) - wg.Add(1) - go func(n int) { - defer wg.Done() - for j := 0; j < filesPerActor; j++ { - out, err := writeFile(fileSize, mnt.Dir+fmt.Sprintf("/local/%dFILE%d", n, j)) - if err != nil { - t.Error(err) - continue - } - data[n][j] = out - } - }(i) - } - wg.Wait() - - for i := 0; i < nactors; i++ { - for j := 0; j < filesPerActor; j++ { - if data[i][j] == nil { - // Error already reported. - continue - } - verifyFile(t, mnt.Dir+fmt.Sprintf("/local/%dFILE%d", i, j), data[i][j]) - } - } + _, mnt = setupIpnsTest(t, nd) + got, err := os.ReadFile(mnt.Dir + "/local/persist") + require.NoError(t, err) + require.True(t, bytes.Equal(data, got)) } -func TestFSThrash(t *testing.T) { - files := make(map[string][]byte) - - if testing.Short() { - t.SkipNow() - } - _, mnt := setupIpnsTest(t, nil) - defer mnt.Close() - - base := mnt.Dir + "/local" - dirs := []string{base} - dirlock := sync.RWMutex{} - filelock := sync.Mutex{} - - ndirWorkers := 2 - nfileWorkers := 2 - - ndirs := 100 - nfiles := 200 - - wg := sync.WaitGroup{} - - // Spawn off workers to make directories - for i := range ndirWorkers { - wg.Add(1) - go func(worker int) { - defer wg.Done() - for j := range ndirs { - dirlock.RLock() - n := mrand.Intn(len(dirs)) - dir := dirs[n] - dirlock.RUnlock() - - newDir := fmt.Sprintf("%s/dir%d-%d", dir, worker, j) - err := os.Mkdir(newDir, os.ModeDir) - if err != nil { - t.Error(err) - continue - } - dirlock.Lock() - dirs = append(dirs, newDir) - dirlock.Unlock() - } - }(i) - } +// TestMultipleDirs verifies nested directories persist across remount. +func TestMultipleDirs(t *testing.T) { + nd, mnt := setupIpnsTest(t, nil) - // Spawn off workers to make files - for i := range nfileWorkers { - wg.Add(1) - go func(worker int) { - defer wg.Done() - for j := range nfiles { - dirlock.RLock() - n := mrand.Intn(len(dirs)) - dir := dirs[n] - dirlock.RUnlock() - - newFileName := fmt.Sprintf("%s/file%d-%d", dir, worker, j) - - data, err := writeFile(2000+mrand.Intn(5000), newFileName) - if err != nil { - t.Error(err) - continue - } - filelock.Lock() - files[newFileName] = data - filelock.Unlock() - } - }(i) - } + require.NoError(t, os.Mkdir(mnt.Dir+"/local/test1", 0o755)) + data1 := fusetest.WriteFileOrFail(t, 4000, mnt.Dir+"/local/test1/file1") + require.NoError(t, os.Mkdir(mnt.Dir+"/local/test1/dir2", 0o755)) + data2 := fusetest.WriteFileOrFail(t, 5000, mnt.Dir+"/local/test1/dir2/file2") - wg.Wait() - for name, data := range files { - out, err := os.ReadFile(name) - if err != nil { - t.Error(err) - } + mnt.Close() + _, mnt = setupIpnsTest(t, nd) - if !bytes.Equal(data, out) { - t.Errorf("Data didn't match in %s: expected %v, got %v", name, data, out) - } - } + fusetest.CheckExists(t, mnt.Dir+"/local/test1") + fusetest.VerifyFile(t, mnt.Dir+"/local/test1/file1", data1) + fusetest.VerifyFile(t, mnt.Dir+"/local/test1/dir2/file2", data2) } -// Test writing a medium sized file one byte at a time. -func TestMultiWrite(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - +// TestStatfs verifies that statfs on the /ipns mount reports the disk +// space of the repo's backing filesystem. macOS Finder refuses to copy +// files onto a volume that reports zero free space. +func TestStatfs(t *testing.T) { _, mnt := setupIpnsTest(t, nil) - defer mnt.Close() - - fpath := mnt.Dir + "/local/file" - fi, err := os.Create(fpath) - if err != nil { - t.Fatal(err) - } - data := randBytes(1001) - for i := range data { - n, err := fi.Write(data[i : i+1]) - if err != nil { - t.Fatal(err) - } - if n != 1 { - t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") - } - } - fi.Close() - - rbuf, err := os.ReadFile(fpath) - if err != nil { - t.Fatal(err) - } + // The in-memory test repo returns "" for Path(), so point RepoPath + // at a real directory to exercise the syscall path. + repoDir := t.TempDir() + mnt.Root.RepoPath = repoDir - if !bytes.Equal(rbuf, data) { - t.Fatal("File on disk did not match bytes written") - } + fusetest.AssertStatfsNonZero(t, mnt.Dir) } diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go index de662997c88..f17c4750868 100644 --- a/fuse/ipns/ipns_unix.go +++ b/fuse/ipns/ipns_unix.go @@ -1,97 +1,63 @@ -//go:build !nofuse && !openbsd && !netbsd && !plan9 +//go:build (linux || darwin || freebsd) && !nofuse -// package fuse/ipns implements a fuse filesystem that interfaces -// with ipns, the naming system for ipfs. +// Package ipns implements a FUSE filesystem that interfaces with IPNS, +// the naming system for IPFS. Only names for which the node holds +// private keys are writable; all other names resolve to read-only +// symlinks pointing at the /ipfs mount. package ipns import ( "context" - "errors" - "fmt" - "io" - "os" "strings" "syscall" + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" dag "github.com/ipfs/boxo/ipld/merkledag" ft "github.com/ipfs/boxo/ipld/unixfs" + mfs "github.com/ipfs/boxo/mfs" "github.com/ipfs/boxo/namesys" "github.com/ipfs/boxo/path" - - fuse "bazil.org/fuse" - fs "bazil.org/fuse/fs" - mfs "github.com/ipfs/boxo/mfs" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" + "github.com/ipfs/kubo/config" iface "github.com/ipfs/kubo/core/coreiface" options "github.com/ipfs/kubo/core/coreiface/options" + fusemnt "github.com/ipfs/kubo/fuse/mount" + "github.com/ipfs/kubo/fuse/writable" + "github.com/ipfs/kubo/internal/fusemount" ) -func init() { - if os.Getenv("IPFS_FUSE_DEBUG") != "" { - fuse.Debug = func(msg any) { - fmt.Println(msg) - } - } -} - var log = logging.Logger("fuse/ipns") -// FileSystem is the readwrite IPNS Fuse Filesystem. -type FileSystem struct { - Ipfs iface.CoreAPI - RootNode *Root -} - -// NewFileSystem constructs new fs using given core.IpfsNode instance. -func NewFileSystem(ctx context.Context, ipfs iface.CoreAPI, ipfspath, ipnspath string) (*FileSystem, error) { - key, err := ipfs.Key().Self(ctx) - if err != nil { - return nil, err - } - root, err := CreateRoot(ctx, ipfs, map[string]iface.Key{"local": key}, ipfspath, ipnspath) - if err != nil { - return nil, err - } - - return &FileSystem{Ipfs: ipfs, RootNode: root}, nil -} - -// Root constructs the Root of the filesystem, a Root object. -func (f *FileSystem) Root() (fs.Node, error) { - log.Debug("filesystem, get root") - return f.RootNode, nil -} - -func (f *FileSystem) Destroy() { - err := f.RootNode.Close() - if err != nil { - log.Errorf("Error Shutting Down Filesystem: %s\n", err) - } -} - -// Root is the root object of the filesystem tree. +// Root is the root object of the /ipns filesystem tree. type Root struct { + fs.Inode Ipfs iface.CoreAPI Keys map[string]iface.Key // Used for symlinking into ipfs IpfsRoot string IpnsRoot string - LocalDirs map[string]fs.Node + LocalDirs map[string]*writable.Dir Roots map[string]*mfs.Root LocalLinks map[string]*Link + RepoPath string } func ipnsPubFunc(ipfs iface.CoreAPI, key iface.Key) mfs.PubFunc { return func(ctx context.Context, c cid.Cid) error { - _, err := ipfs.Name().Publish(ctx, path.FromCid(c), options.Name.Key(key.Name())) + // Bypass the "cannot publish while IPNS is mounted" guard. + // Without this the mount's own publishes are blocked, + // causing silent data loss on daemon restart (issue #2168). + ctx = fusemount.ContextWithPublish(ctx) + _, err := ipfs.Name().Publish(ctx, path.FromCid(c), options.Name.Key(key.Name()), options.Name.AllowOffline(true)) return err } } -func loadRoot(ctx context.Context, ipfs iface.CoreAPI, key iface.Key) (*mfs.Root, fs.Node, error) { +func loadRoot(ctx context.Context, ipfs iface.CoreAPI, key iface.Key, cfg *writable.Config, mfsOpts ...mfs.Option) (*mfs.Root, *writable.Dir, error) { node, err := ipfs.ResolveNode(ctx, key.Path()) switch err { case nil: @@ -107,36 +73,37 @@ func loadRoot(ctx context.Context, ipfs iface.CoreAPI, key iface.Key) (*mfs.Root return nil, nil, dag.ErrNotProtobuf } - // We have no access to provider.System from the CoreAPI. The Routing - // part offers Provide through the router so it may be slow/risky - // to give that here to MFS. Therefore we leave as nil. - root, err := mfs.NewRoot(ctx, ipfs.Dag(), pbnode, ipnsPubFunc(ipfs, key), nil) + root, err := mfs.NewRoot(ctx, ipfs.Dag(), pbnode, ipnsPubFunc(ipfs, key), nil, mfsOpts...) if err != nil { return nil, nil, err } - return root, &Directory{dir: root.GetDirectory()}, nil + return root, writable.NewDir(root.GetDirectory(), cfg), nil } -func CreateRoot(ctx context.Context, ipfs iface.CoreAPI, keys map[string]iface.Key, ipfspath, ipnspath string) (*Root, error) { - ldirs := make(map[string]fs.Node) +// CreateRoot creates the IPNS FUSE root with one writable directory per key. +func CreateRoot(ctx context.Context, ipfs iface.CoreAPI, keys map[string]iface.Key, ipfspath, ipnspath, repoPath string, mountsCfg config.Mounts, imp config.Import, mfsOpts ...mfs.Option) (*Root, error) { + cfg := &writable.Config{ + StoreMtime: mountsCfg.StoreMtime.WithDefault(config.DefaultStoreMtime), + StoreMode: mountsCfg.StoreMode.WithDefault(config.DefaultStoreMode), + DAG: ipfs.Dag(), + RepoPath: repoPath, + Blksize: fusemnt.BlksizeFromChunker(imp.UnixFSChunker.WithDefault(config.DefaultUnixFSChunker)), + } + + ldirs := make(map[string]*writable.Dir) roots := make(map[string]*mfs.Root) links := make(map[string]*Link) for alias, k := range keys { - root, fsn, err := loadRoot(ctx, ipfs, k) + root, dir, err := loadRoot(ctx, ipfs, k, cfg, mfsOpts...) if err != nil { return nil, err } name := k.ID().String() - roots[name] = root - ldirs[name] = fsn - - // set up alias symlink - links[alias] = &Link{ - Target: name, - } + ldirs[name] = dir + links[alias] = &Link{Target: name} } return &Root{ @@ -147,422 +114,84 @@ func CreateRoot(ctx context.Context, ipfs iface.CoreAPI, keys map[string]iface.K LocalDirs: ldirs, LocalLinks: links, Roots: roots, + RepoPath: repoPath, }, nil } -// Attr returns file attributes. -func (r *Root) Attr(ctx context.Context, a *fuse.Attr) error { - log.Debug("Root Attr") - a.Mode = os.ModeDir | 0o111 // -rw+x - return nil +// Getattr returns the root directory attributes. +func (r *Root) Getattr(_ context.Context, _ fs.FileHandle, out *fuse.AttrOut) syscall.Errno { + out.Attr.Mode = uint32(fusemnt.NamespaceRootMode.Perm()) + return 0 +} + +// Statfs reports disk-space statistics for the underlying filesystem. +// macOS Finder checks free space before copying; without this it +// reports "not enough free space" because go-fuse returns zeroed stats. +func (r *Root) Statfs(_ context.Context, out *fuse.StatfsOut) syscall.Errno { + if r.RepoPath == "" { + return 0 + } + var s syscall.Statfs_t + if err := syscall.Statfs(r.RepoPath, &s); err != nil { + return fs.ToErrno(err) + } + out.FromStatfsT(&s) + return 0 } -// Lookup performs a lookup under this node. -func (r *Root) Lookup(ctx context.Context, name string) (fs.Node, error) { +func (r *Root) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) { switch name { case "mach_kernel", ".hidden", "._.": - // Just quiet some log noise on OS X. - return nil, syscall.Errno(syscall.ENOENT) + return nil, syscall.ENOENT } if lnk, ok := r.LocalLinks[name]; ok { - return lnk, nil + return r.NewInode(ctx, lnk, fs.StableAttr{Mode: syscall.S_IFLNK}), 0 } - nd, ok := r.LocalDirs[name] - if ok { - switch nd := nd.(type) { - case *Directory: - return nd, nil - case *FileNode: - return nd, nil - default: - return nil, syscall.Errno(syscall.EIO) - } + if dir, ok := r.LocalDirs[name]; ok { + return r.NewInode(ctx, dir, fs.StableAttr{Mode: syscall.S_IFDIR}), 0 } - // other links go through ipns resolution and are symlinked into the ipfs mountpoint - ipnsName := "/ipns/" + name - resolved, err := r.Ipfs.Name().Resolve(ctx, ipnsName) + // Other links go through IPNS resolution and are symlinked into the /ipfs mount. + resolved, err := r.Ipfs.Name().Resolve(ctx, "/ipns/"+name) if err != nil { log.Warnf("ipns: namesys resolve error: %s", err) - return nil, syscall.Errno(syscall.ENOENT) + return nil, syscall.ENOENT } if resolved.Namespace() != path.IPFSNamespace { - return nil, errors.New("invalid path from ipns record") - } - - return &Link{r.IpfsRoot + "/" + strings.TrimPrefix(resolved.String(), "/ipfs/")}, nil -} - -func (r *Root) Close() error { - for _, mr := range r.Roots { - err := mr.Close() - if err != nil { - return err - } + return nil, syscall.ENOENT } - return nil -} -// Forget is called when the filesystem is unmounted. probably. -// see comments here: http://godoc.org/bazil.org/fuse/fs#FSDestroyer -func (r *Root) Forget() { - err := r.Close() - if err != nil { - log.Error(err) - } + lnk := &Link{Target: r.IpfsRoot + "/" + strings.TrimPrefix(resolved.String(), "/ipfs/")} + return r.NewInode(ctx, lnk, fs.StableAttr{Mode: syscall.S_IFLNK}), 0 } -// ReadDirAll reads a particular directory. Will show locally available keys -// as well as a symlink to the peerID key. -func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { - log.Debug("Root ReadDirAll") - - listing := make([]fuse.Dirent, 0, len(r.Keys)*2) +func (r *Root) Readdir(_ context.Context) (fs.DirStream, syscall.Errno) { + entries := make([]fuse.DirEntry, 0, len(r.Keys)*2) for alias, k := range r.Keys { - ent := fuse.Dirent{ - Name: k.ID().String(), - Type: fuse.DT_Dir, - } - link := fuse.Dirent{ - Name: alias, - Type: fuse.DT_Link, - } - listing = append(listing, ent, link) - } - return listing, nil -} - -// Directory is wrapper over an mfs directory to satisfy the fuse fs interface. -type Directory struct { - dir *mfs.Directory -} - -type FileNode struct { - fi *mfs.File -} - -// File is wrapper over an mfs file to satisfy the fuse fs interface. -type File struct { - fi mfs.FileDescriptor -} - -// Attr returns the attributes of a given node. -func (d *Directory) Attr(ctx context.Context, a *fuse.Attr) error { - log.Debug("Directory Attr") - a.Mode = os.ModeDir | 0o555 - a.Uid = uint32(os.Getuid()) - a.Gid = uint32(os.Getgid()) - return nil -} - -// Attr returns the attributes of a given node. -func (fi *FileNode) Attr(ctx context.Context, a *fuse.Attr) error { - log.Debug("File Attr") - size, err := fi.fi.Size() - if err != nil { - // In this case, the dag node in question may not be unixfs - return fmt.Errorf("fuse/ipns: failed to get file.Size(): %s", err) - } - a.Mode = os.FileMode(0o666) - a.Size = uint64(size) - a.Uid = uint32(os.Getuid()) - a.Gid = uint32(os.Getgid()) - return nil -} - -// Lookup performs a lookup under this node. -func (d *Directory) Lookup(ctx context.Context, name string) (fs.Node, error) { - child, err := d.dir.Child(name) - if err != nil { - // todo: make this error more versatile. - return nil, syscall.Errno(syscall.ENOENT) - } - - switch child := child.(type) { - case *mfs.Directory: - return &Directory{dir: child}, nil - case *mfs.File: - return &FileNode{fi: child}, nil - default: - // NB: if this happens, we do not want to continue, unpredictable behaviour - // may occur. - panic("invalid type found under directory. programmer error.") - } -} - -// ReadDirAll reads the link structure as directory entries. -func (d *Directory) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { - listing, err := d.dir.List(ctx) - if err != nil { - return nil, err - } - entries := make([]fuse.Dirent, len(listing)) - for i, entry := range listing { - dirent := fuse.Dirent{Name: entry.Name} - - switch mfs.NodeType(entry.Type) { - case mfs.TDir: - dirent.Type = fuse.DT_Dir - case mfs.TFile: - dirent.Type = fuse.DT_File - } - - entries[i] = dirent - } - - if len(entries) > 0 { - return entries, nil - } - return nil, syscall.Errno(syscall.ENOENT) -} - -func (fi *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - _, err := fi.fi.Seek(req.Offset, io.SeekStart) - if err != nil { - return err - } - - fisize, err := fi.fi.Size() - if err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: + entries = append(entries, + fuse.DirEntry{Name: k.ID().String(), Mode: syscall.S_IFDIR}, + fuse.DirEntry{Name: alias, Mode: syscall.S_IFLNK}, + ) } - - readsize := min(req.Size, int(fisize-req.Offset)) - n, err := fi.fi.CtxReadFull(ctx, resp.Data[:readsize]) - resp.Data = resp.Data[:n] - return err + return fs.NewListDirStream(entries), 0 } -func (fi *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { - // TODO: at some point, ensure that WriteAt here respects the context - wrote, err := fi.fi.WriteAt(req.Data, req.Offset) - if err != nil { - return err - } - resp.Size = wrote - return nil -} - -func (fi *File) Flush(ctx context.Context, req *fuse.FlushRequest) error { - errs := make(chan error, 1) - go func() { - errs <- fi.fi.Flush() - }() - select { - case err := <-errs: - return err - case <-ctx.Done(): - return ctx.Err() - } -} - -func (fi *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - if req.Valid.Size() { - cursize, err := fi.fi.Size() - if err != nil { - return err - } - if cursize != int64(req.Size) { - err := fi.fi.Truncate(int64(req.Size)) - if err != nil { - return err - } - } - } - return nil -} - -// Fsync flushes the content in the file to disk. -func (fi *FileNode) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { - // This needs to perform a *full* flush because, in MFS, a write isn't - // persisted until the root is updated. - errs := make(chan error, 1) - go func() { - errs <- fi.fi.Flush() - }() - select { - case err := <-errs: - return err - case <-ctx.Done(): - return ctx.Err() - } -} - -func (fi *File) Forget() { - // TODO(steb): this seems like a place where we should be *uncaching*, not flushing. - err := fi.fi.Flush() - if err != nil { - log.Debug("forget file error: ", err) - } -} - -func (d *Directory) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { - child, err := d.dir.Mkdir(req.Name) - if err != nil { - return nil, err - } - - return &Directory{dir: child}, nil -} - -func (fi *FileNode) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { - fd, err := fi.fi.Open(mfs.Flags{ - Read: req.Flags.IsReadOnly() || req.Flags.IsReadWrite(), - Write: req.Flags.IsWriteOnly() || req.Flags.IsReadWrite(), - Sync: true, - }) - if err != nil { - return nil, err - } - - if req.Flags&fuse.OpenTruncate != 0 { - if req.Flags.IsReadOnly() { - log.Error("tried to open a readonly file with truncate") - return nil, syscall.Errno(syscall.ENOTSUP) - } - log.Info("Need to truncate file!") - err := fd.Truncate(0) - if err != nil { - return nil, err - } - } else if req.Flags&fuse.OpenAppend != 0 { - log.Info("Need to append to file!") - if req.Flags.IsReadOnly() { - log.Error("tried to open a readonly file with append") - return nil, syscall.Errno(syscall.ENOTSUP) - } - - _, err := fd.Seek(0, io.SeekEnd) - if err != nil { - log.Error("seek reset failed: ", err) - return nil, err - } - } - - return &File{fi: fd}, nil -} - -func (fi *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error { - return fi.fi.Close() -} - -func (d *Directory) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { - // New 'empty' file - nd := dag.NodeWithData(ft.FilePBData(nil, 0)) - err := d.dir.AddChild(req.Name, nd) - if err != nil { - return nil, nil, err - } - - child, err := d.dir.Child(req.Name) - if err != nil { - return nil, nil, err - } - - fi, ok := child.(*mfs.File) - if !ok { - return nil, nil, errors.New("child creation failed") - } - - nodechild := &FileNode{fi: fi} - - fd, err := fi.Open(mfs.Flags{ - Read: req.Flags.IsReadOnly() || req.Flags.IsReadWrite(), - Write: req.Flags.IsWriteOnly() || req.Flags.IsReadWrite(), - Sync: true, - }) - if err != nil { - return nil, nil, err - } - - return nodechild, &File{fi: fd}, nil -} - -func (d *Directory) Remove(ctx context.Context, req *fuse.RemoveRequest) error { - err := d.dir.Unlink(req.Name) - if err != nil { - return syscall.Errno(syscall.ENOENT) - } - return nil -} - -// Rename implements NodeRenamer. -func (d *Directory) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error { - cur, err := d.dir.Child(req.OldName) - if err != nil { - return err - } - - err = d.dir.Unlink(req.OldName) - if err != nil { - return err - } - - switch newDir := newDir.(type) { - case *Directory: - nd, err := cur.GetNode() - if err != nil { - return err - } - - err = newDir.dir.AddChild(req.NewName, nd) - if err != nil { +func (r *Root) Close() error { + for _, mr := range r.Roots { + if err := mr.Close(); err != nil { return err } - case *FileNode: - log.Error("Cannot move node into a file!") - return syscall.Errno(syscall.EPERM) - default: - log.Error("Unknown node type for rename target dir!") - return errors.New("unknown fs node type") } return nil } -// to check that out Node implements all the interfaces we want. -type ipnsRoot interface { - fs.Node - fs.HandleReadDirAller - fs.NodeStringLookuper -} - -var _ ipnsRoot = (*Root)(nil) - -type ipnsDirectory interface { - fs.HandleReadDirAller - fs.Node - fs.NodeCreater - fs.NodeMkdirer - fs.NodeRemover - fs.NodeRenamer - fs.NodeStringLookuper -} - -var _ ipnsDirectory = (*Directory)(nil) - -type ipnsFile interface { - fs.HandleFlusher - fs.HandleReader - fs.HandleWriter - fs.HandleReleaser -} - -type ipnsFileNode interface { - fs.Node - fs.NodeFsyncer - fs.NodeOpener -} - +// Interface compliance checks for Root. var ( - _ ipnsFileNode = (*FileNode)(nil) - _ ipnsFile = (*File)(nil) + _ fs.NodeGetattrer = (*Root)(nil) + _ fs.NodeLookuper = (*Root)(nil) + _ fs.NodeReaddirer = (*Root)(nil) + _ fs.NodeStatfser = (*Root)(nil) ) diff --git a/fuse/ipns/link_unix.go b/fuse/ipns/link_unix.go index f95894b1235..c3c8e3d53dd 100644 --- a/fuse/ipns/link_unix.go +++ b/fuse/ipns/link_unix.go @@ -1,28 +1,33 @@ -//go:build !nofuse && !openbsd && !netbsd && !plan9 +// Symlink node for the /ipns FUSE mount. go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse package ipns import ( "context" - "os" + "syscall" - "bazil.org/fuse" - "bazil.org/fuse/fs" + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" ) type Link struct { + fs.Inode Target string } -func (l *Link) Attr(ctx context.Context, a *fuse.Attr) error { +func (l *Link) Getattr(_ context.Context, _ fs.FileHandle, out *fuse.AttrOut) syscall.Errno { log.Debug("Link attr.") - a.Mode = os.ModeSymlink | 0o555 - return nil + out.Attr.Mode = 0o555 + return 0 } -func (l *Link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { +func (l *Link) Readlink(_ context.Context) ([]byte, syscall.Errno) { log.Debugf("ReadLink: %s", l.Target) - return l.Target, nil + return []byte(l.Target), 0 } -var _ fs.NodeReadlinker = (*Link)(nil) +var ( + _ fs.NodeGetattrer = (*Link)(nil) + _ fs.NodeReadlinker = (*Link)(nil) +) diff --git a/fuse/ipns/mount_unix.go b/fuse/ipns/mount_unix.go index da3a6ac0b31..f9bbad62400 100644 --- a/fuse/ipns/mount_unix.go +++ b/fuse/ipns/mount_unix.go @@ -1,15 +1,30 @@ -//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse +// Mount/unmount helpers for the /ipns FUSE mount. go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse package ipns import ( + "os" + "time" + + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" + "github.com/ipfs/kubo/config" core "github.com/ipfs/kubo/core" coreapi "github.com/ipfs/kubo/core/coreapi" - mount "github.com/ipfs/kubo/fuse/mount" + iface "github.com/ipfs/kubo/core/coreiface" + fusemnt "github.com/ipfs/kubo/fuse/mount" ) +// How long the kernel caches Lookup and Getattr results. 1 second +// matches the go-fuse default and what gocryptfs/rclone use. +// TODO: for resolved IPNS names, use the record's cache TTL (capped +// at Ipns.MaxCacheTTL) instead of a fixed 1 second. +// var (not const) because fs.Options needs a *time.Duration. +var mutableCacheTime = time.Second + // Mount mounts ipns at a given location, and returns a mount.Mount instance. -func Mount(ipfs *core.IpfsNode, ipnsmp, ipfsmp string) (mount.Mount, error) { +func Mount(ipfs *core.IpfsNode, ipnsmp, ipfsmp string) (fusemnt.Mount, error) { coreAPI, err := coreapi.NewCoreAPI(ipfs) if err != nil { return nil, err @@ -20,12 +35,54 @@ func Mount(ipfs *core.IpfsNode, ipnsmp, ipfsmp string) (mount.Mount, error) { return nil, err } - allowOther := cfg.Mounts.FuseAllowOther + mfsOpts, err := cfg.Import.MFSRootOptions() + if err != nil { + return nil, err + } + + key, err := coreAPI.Key().Self(ipfs.Context()) + if err != nil { + return nil, err + } - fsys, err := NewFileSystem(ipfs.Context(), coreAPI, ipfsmp, ipnsmp) + root, err := CreateRoot(ipfs.Context(), coreAPI, map[string]iface.Key{"local": key}, ipfsmp, ipnsmp, ipfs.Repo.Path(), cfg.Mounts, cfg.Import, mfsOpts...) if err != nil { return nil, err } - return mount.NewMount(fsys, ipnsmp, allowOther) + opts := &fs.Options{ + NullPermissions: true, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + EntryTimeout: &mutableCacheTime, + AttrTimeout: &mutableCacheTime, + MountOptions: fuse.MountOptions{ + AllowOther: cfg.Mounts.FuseAllowOther.WithDefault(config.DefaultFuseAllowOther), + FsName: "ipns", + MaxReadAhead: fusemnt.MaxReadAhead, + Debug: os.Getenv("IPFS_FUSE_DEBUG") != "", + ExtraCapabilities: fusemnt.WritableMountCapabilities, + }, + } + + m, err := fusemnt.NewMount(root, ipnsmp, opts) + if err != nil { + _ = root.Close() + return nil, err + } + + return &ipnsMount{Mount: m, root: root}, nil +} + +// ipnsMount wraps mount.Mount to call Root.Close() on unmount, +// which flushes and publishes all MFS roots. +type ipnsMount struct { + fusemnt.Mount + root *Root +} + +func (m *ipnsMount) Unmount() error { + err := m.Mount.Unmount() + _ = m.root.Close() + return err } diff --git a/fuse/mfs/mfs_test.go b/fuse/mfs/mfs_test.go index 0e61f1903ea..d76007ed326 100644 --- a/fuse/mfs/mfs_test.go +++ b/fuse/mfs/mfs_test.go @@ -1,4 +1,9 @@ -//go:build !nofuse && !openbsd && !netbsd && !plan9 +//go:build (linux || darwin || freebsd) && !nofuse + +// Unit tests for the /mfs FUSE mount. +// Generic writable operations are exercised by the shared suite in +// fusetest.RunWritableSuite. This file contains the mount factory +// and MFS-specific tests only. package mfs @@ -6,336 +11,156 @@ import ( "bytes" "context" "crypto/rand" - "errors" - iofs "io/fs" "os" - "slices" - "strconv" + "syscall" "testing" - "time" - "bazil.org/fuse" - "bazil.org/fuse/fs" - "bazil.org/fuse/fs/fstestutil" + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" + "github.com/stretchr/testify/require" + + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core" "github.com/ipfs/kubo/core/node" - "github.com/libp2p/go-libp2p-testing/ci" + "github.com/ipfs/kubo/fuse/fusetest" + fusemnt "github.com/ipfs/kubo/fuse/mount" + "github.com/ipfs/kubo/fuse/writable" ) -// Create an Ipfs.Node, a filesystem and a mount point. -func setUp(t *testing.T, ipfs *core.IpfsNode) (fs.FS, *fstestutil.Mount) { - if ci.NoFuse() { - t.Skip("Skipping FUSE tests") - } - - if ipfs == nil { - var err error - ipfs, err = core.NewNode(context.Background(), &node.BuildCfg{}) - if err != nil { - t.Fatal(err) - } - } - - fs := NewFileSystem(ipfs) - mnt, err := fstestutil.MountedT(t, fs, nil) - if err == fuse.ErrOSXFUSENotFound { - t.Skip(err) - } - if err != nil { - t.Fatal(err) - } - - return fs, mnt +func testMount(t *testing.T, root fs.InodeEmbedder) string { + t.Helper() + return fusetest.TestMount(t, root, &fs.Options{ + EntryTimeout: &mutableCacheTime, + AttrTimeout: &mutableCacheTime, + MountOptions: fuse.MountOptions{ + MaxReadAhead: fusemnt.MaxReadAhead, + ExtraCapabilities: fusemnt.WritableMountCapabilities, + }, + }) } -// Test reading and writing a file. -func TestReadWrite(t *testing.T) { - _, mnt := setUp(t, nil) - defer mnt.Close() +func mfsMount(t *testing.T, cfg writable.Config) string { + t.Helper() + ipfs, err := core.NewNode(context.Background(), &node.BuildCfg{}) + require.NoError(t, err) - path := mnt.Dir + "/testrw" - content := make([]byte, 8196) - _, err := rand.Read(content) - if err != nil { - t.Fatal(err) + mountsCfg := config.Mounts{} + if cfg.StoreMtime { + mountsCfg.StoreMtime = config.True } - - t.Run("write", func(t *testing.T) { - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - _, err = f.Write(content) - if err != nil { - t.Fatal(err) - } - }) - t.Run("read", func(t *testing.T) { - f, err := os.Open(path) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - buf := make([]byte, 8196) - l, err := f.Read(buf) - if err != nil { - t.Fatal(err) - } - if bytes.Equal(content, buf[:l]) != true { - t.Fatal("read and write not equal") - } - }) + if cfg.StoreMode { + mountsCfg.StoreMode = config.True + } + root := NewFileSystem(ipfs, mountsCfg, config.Import{}) + return testMount(t, root) } -// Test creating a directory. -func TestMkdir(t *testing.T) { - _, mnt := setUp(t, nil) - defer mnt.Close() - - path := mnt.Dir + "/foo/bar/baz/qux/quux" - - t.Run("write", func(t *testing.T) { - err := os.MkdirAll(path, iofs.ModeDir) - if err != nil { - t.Fatal(err) - } - }) - t.Run("read", func(t *testing.T) { - stat, err := os.Stat(path) - if err != nil { - t.Fatal(err) - } - if !stat.IsDir() { - t.Fatal("not dir") - } - }) +func TestWritableSuite(t *testing.T) { + fusetest.RunWritableSuite(t, mfsMount) } -// Test file persistence across mounts. +// TestPersistence verifies that file data survives unmount and remount +// on the same IpfsNode. func TestPersistence(t *testing.T) { ipfs, err := core.NewNode(context.Background(), &node.BuildCfg{}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) content := make([]byte, 8196) _, err = rand.Read(content) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Run("write", func(t *testing.T) { - _, mnt := setUp(t, ipfs) - defer mnt.Close() - path := mnt.Dir + "/testpersistence" - - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - defer f.Close() + root := NewFileSystem(ipfs, config.Mounts{}, config.Import{}) + mntDir := testMount(t, root) + f, err := os.Create(mntDir + "/testpersistence") + require.NoError(t, err) _, err = f.Write(content) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, f.Close()) }) t.Run("read", func(t *testing.T) { - _, mnt := setUp(t, ipfs) - defer mnt.Close() - path := mnt.Dir + "/testpersistence" - - f, err := os.Open(path) - if err != nil { - t.Fatal(err) - } - defer f.Close() + root := NewFileSystem(ipfs, config.Mounts{}, config.Import{}) + mntDir := testMount(t, root) - buf := make([]byte, 8196) - l, err := f.Read(buf) - if err != nil { - t.Fatal(err) - } - if bytes.Equal(content, buf[:l]) != true { - t.Fatal("read and write not equal") - } + got, err := os.ReadFile(mntDir + "/testpersistence") + require.NoError(t, err) + require.True(t, bytes.Equal(content, got)) }) } -// Test getting the file attributes. -func TestAttr(t *testing.T) { - _, mnt := setUp(t, nil) - defer mnt.Close() - - path := mnt.Dir + "/testattr" - content := make([]byte, 8196) - _, err := rand.Read(content) - if err != nil { - t.Fatal(err) - } - - t.Run("write", func(t *testing.T) { - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - _, err = f.Write(content) - if err != nil { - t.Fatal(err) - } +// TestStatBlocks verifies that stat(2) on entries in /mfs populates +// st_blocks (used by du and ls -s) consistent with the file size, and +// that st_blksize advertises the chunker size MFS will use for writes +// so tools can align their I/O buffers. +func TestStatBlocks(t *testing.T) { + const chunkerStr = "size-65536" + const wantBlksize uint32 = 65536 + + ipfs, err := core.NewNode(t.Context(), &node.BuildCfg{}) + require.NoError(t, err) + + kuboCfg := config.Import{UnixFSChunker: *config.NewOptionalString(chunkerStr)} + root := NewFileSystem(ipfs, config.Mounts{}, kuboCfg) + mntDir := testMount(t, root) + + t.Run("multi-block file", func(t *testing.T) { + // >1 MiB ensures the UnixFS DAG has multiple leaves under the + // configured 64 KiB chunker. + content := make([]byte, 1024*1024+1) + _, err := rand.Read(content) + require.NoError(t, err) + fpath := mntDir + "/big" + require.NoError(t, os.WriteFile(fpath, content, 0o644)) + fusetest.AssertStatBlocks(t, fpath, wantBlksize) }) - t.Run("read", func(t *testing.T) { - fi, err := os.Stat(path) - if err != nil { - t.Fatal(err) - } - - if fi.IsDir() { - t.Fatal("file is a directory") - } - if fi.ModTime().After(time.Now()) { - t.Fatal("future modtime") - } - if time.Since(fi.ModTime()) > time.Second { - t.Fatal("past modtime") - } - - if fi.Name() != "testattr" { - t.Fatal("invalid filename") - } - - if fi.Size() != 8196 { - t.Fatal("invalid size") - } + t.Run("small single-chunk file", func(t *testing.T) { + fpath := mntDir + "/small" + require.NoError(t, os.WriteFile(fpath, []byte("hello"), 0o644)) + fusetest.AssertStatBlocks(t, fpath, wantBlksize) }) -} - -// Test concurrent access to the filesystem. -func TestConcurrentRW(t *testing.T) { - _, mnt := setUp(t, nil) - defer mnt.Close() - files := 5 - fileWorkers := 5 - - path := mnt.Dir + "/testconcurrent" - content := make([][]byte, files) - - for i := range content { - content[i] = make([]byte, 8196) - _, err := rand.Read(content[i]) - if err != nil { - t.Fatal(err) - } - } - - t.Run("write", func(t *testing.T) { - errs := make(chan (error), 1) - for i := range files { - go func() { - var err error - defer func() { errs <- err }() - - f, err := os.Create(path + strconv.Itoa(i)) - if err != nil { - return - } - defer f.Close() - - _, err = f.Write(content[i]) - if err != nil { - return - } - }() - } - for range files { - err := <-errs - if err != nil { - t.Fatal(err) - } - } + t.Run("directory", func(t *testing.T) { + dpath := mntDir + "/d" + require.NoError(t, os.Mkdir(dpath, 0o755)) + info, err := os.Stat(dpath) + require.NoError(t, err) + st, ok := info.Sys().(*syscall.Stat_t) + require.True(t, ok) + require.EqualValues(t, 1, st.Blocks, "directory should report 1 nominal block") + require.EqualValues(t, wantBlksize, st.Blksize) }) - t.Run("read", func(t *testing.T) { - errs := make(chan (error), 1) - for i := 0; i < files*fileWorkers; i++ { - go func() { - var err error - defer func() { errs <- err }() - f, err := os.Open(path + strconv.Itoa(i/fileWorkers)) - if err != nil { - return - } - defer f.Close() - - buf := make([]byte, 8196) - l, err := f.Read(buf) - if err != nil { - return - } - if bytes.Equal(content[i/fileWorkers], buf[:l]) != true { - err = errors.New("read and write not equal") - return - } - }() - } - for range files { - err := <-errs - if err != nil { - t.Fatal(err) - } - } + t.Run("symlink", func(t *testing.T) { + const target = "../some/target" + lpath := mntDir + "/link" + require.NoError(t, os.Symlink(target, lpath)) + info, err := os.Lstat(lpath) + require.NoError(t, err) + st, ok := info.Sys().(*syscall.Stat_t) + require.True(t, ok) + require.EqualValues(t, len(target), st.Size) + require.EqualValues(t, 1, st.Blocks) + require.EqualValues(t, wantBlksize, st.Blksize) }) } -// Test ipfs_cid extended attribute -func TestMFSRootXattr(t *testing.T) { - ipfs, err := core.NewNode(context.Background(), &node.BuildCfg{}) - if err != nil { - t.Fatal(err) - } - - fs, mnt := setUp(t, ipfs) - defer mnt.Close() - - node, err := fs.Root() - if err != nil { - t.Fatal(err) - } - - root := node.(*Dir) - - listReq := fuse.ListxattrRequest{} - listRes := fuse.ListxattrResponse{} - err = root.Listxattr(context.Background(), &listReq, &listRes) - if err != nil { - t.Fatal(err) - } - if slices.Compare(listRes.Xattr, []byte("ipfs_cid\x00")) != 0 { - t.Fatal("list xattr returns invalid value") - } - - getReq := fuse.GetxattrRequest{ - Name: "ipfs_cid", - } - getRes := fuse.GetxattrResponse{} - err = root.Getxattr(context.Background(), &getReq, &getRes) - if err != nil { - t.Fatal(err) - } - - ipldNode, err := ipfs.FilesRoot.GetDirectory().GetNode() - if err != nil { - t.Fatal(err) - } +// TestStatfs verifies that statfs on the /mfs mount reports the disk +// space of the repo's backing filesystem. macOS Finder refuses to copy +// files onto a volume that reports zero free space. +func TestStatfs(t *testing.T) { + ipfs, err := core.NewNode(t.Context(), &node.BuildCfg{}) + require.NoError(t, err) + + // The default in-memory repo returns "" for Path(), so point + // RepoPath at a real directory to exercise the syscall path. + repoDir := t.TempDir() + root := writable.NewDir(ipfs.FilesRoot.GetDirectory(), &writable.Config{ + DAG: ipfs.DAG, + RepoPath: repoDir, + }) + mntDir := testMount(t, root) - if slices.Compare(getRes.Xattr, []byte(ipldNode.Cid().String())) != 0 { - t.Fatal("xattr cid not equal to mfs root cid") - } + fusetest.AssertStatfsNonZero(t, mntDir) } diff --git a/fuse/mfs/mfs_unix.go b/fuse/mfs/mfs_unix.go index 99ca5fe529e..6756adfa4a1 100644 --- a/fuse/mfs/mfs_unix.go +++ b/fuse/mfs/mfs_unix.go @@ -1,412 +1,23 @@ -//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse +// FUSE filesystem for the /mfs mount. +// +//go:build (linux || darwin || freebsd) && !nofuse package mfs import ( - "context" - "io" - "os" - "sync" - "syscall" - "time" - - "bazil.org/fuse" - "bazil.org/fuse/fs" - - dag "github.com/ipfs/boxo/ipld/merkledag" - ft "github.com/ipfs/boxo/ipld/unixfs" - "github.com/ipfs/boxo/mfs" + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core" + fusemnt "github.com/ipfs/kubo/fuse/mount" + "github.com/ipfs/kubo/fuse/writable" ) -const ( - ipfsCIDXattr = "ipfs_cid" - mfsDirMode = os.ModeDir | 0755 - mfsFileMode = 0644 - blockSize = 512 - dirSize = 8 -) - -// FUSE filesystem mounted at /mfs. -type FileSystem struct { - root Dir -} - -// Get filesystem root. -func (fs *FileSystem) Root() (fs.Node, error) { - return &fs.root, nil -} - -// FUSE Adapter for MFS directories. -type Dir struct { - mfsDir *mfs.Directory -} - -// Directory attributes (stat). -func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { - attr.Mode = mfsDirMode - attr.Size = dirSize * blockSize - attr.Blocks = dirSize - return nil -} - -// Access files in a directory. -func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) { - mfsNode, err := dir.mfsDir.Child(req.Name) - switch err { - case os.ErrNotExist: - return nil, syscall.Errno(syscall.ENOENT) - case nil: - default: - return nil, err - } - - switch mfsNode.Type() { - case mfs.TDir: - result := Dir{ - mfsDir: mfsNode.(*mfs.Directory), - } - return &result, nil - case mfs.TFile: - result := File{ - mfsFile: mfsNode.(*mfs.File), - } - return &result, nil - } - - return nil, syscall.Errno(syscall.ENOENT) -} - -// List (ls) MFS directory. -func (dir *Dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { - var res []fuse.Dirent - nodes, err := dir.mfsDir.List(ctx) - if err != nil { - return nil, err - } - - for _, node := range nodes { - nodeType := fuse.DT_File - if node.Type == 1 { - nodeType = fuse.DT_Dir - } - res = append(res, fuse.Dirent{ - Type: nodeType, - Name: node.Name, - }) - } - return res, nil -} - -// Mkdir (mkdir) in MFS. -func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { - mfsDir, err := dir.mfsDir.Mkdir(req.Name) - if err != nil { - return nil, err - } - return &Dir{ - mfsDir: mfsDir, - }, nil -} - -// Remove (rm/rmdir) an MFS file. -func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { - // Check for empty directory. - if req.Dir { - targetNode, err := dir.mfsDir.Child(req.Name) - if err != nil { - return err - } - target := targetNode.(*mfs.Directory) - - children, err := target.ListNames(ctx) - if err != nil { - return err - } - if len(children) > 0 { - return os.ErrExist - } - } - err := dir.mfsDir.Unlink(req.Name) - if err != nil { - return err - } - return dir.mfsDir.Flush() -} - -// Move (mv) an MFS file. -func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error { - file, err := dir.mfsDir.Child(req.OldName) - if err != nil { - return err - } - node, err := file.GetNode() - if err != nil { - return err - } - targetDir := newDir.(*Dir) - - // Remove file if exists - err = targetDir.mfsDir.Unlink(req.NewName) - if err != nil && err != os.ErrNotExist { - return err - } - - err = targetDir.mfsDir.AddChild(req.NewName, node) - if err != nil { - return err - } - - err = dir.mfsDir.Unlink(req.OldName) - if err != nil { - return err - } - - return dir.mfsDir.Flush() -} - -// Create (touch) an MFS file. -func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { - node := dag.NodeWithData(ft.FilePBData(nil, 0)) - if err := node.SetCidBuilder(dir.mfsDir.GetCidBuilder()); err != nil { - return nil, nil, err - } - - if err := dir.mfsDir.AddChild(req.Name, node); err != nil { - return nil, nil, err - } - - if err := dir.mfsDir.Flush(); err != nil { - return nil, nil, err - } - - mfsNode, err := dir.mfsDir.Child(req.Name) - if err != nil { - return nil, nil, err - } - if err := mfsNode.SetModTime(time.Now()); err != nil { - return nil, nil, err - } - - mfsFile := mfsNode.(*mfs.File) - - file := File{ - mfsFile: mfsFile, - } - - // Read access flags and create a handler. - accessMode := req.Flags & fuse.OpenAccessModeMask - flags := mfs.Flags{ - Read: accessMode == fuse.OpenReadOnly || accessMode == fuse.OpenReadWrite, - Write: accessMode == fuse.OpenWriteOnly || accessMode == fuse.OpenReadWrite, - Sync: req.Flags|fuse.OpenSync > 0, - } - - fd, err := mfsFile.Open(flags) - if err != nil { - return nil, nil, err - } - handler := FileHandler{ - mfsFD: fd, - } - - return &file, &handler, nil -} - -// List dir xattr. -func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { - resp.Append(ipfsCIDXattr) - return nil -} - -// Get dir xattr. -func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { - switch req.Name { - case ipfsCIDXattr: - node, err := dir.mfsDir.GetNode() - if err != nil { - return err - } - resp.Xattr = []byte(node.Cid().String()) - return nil - default: - return fuse.ErrNoXattr - } -} - -// FUSE adapter for MFS files. -type File struct { - mfsFile *mfs.File +// NewFileSystem creates a new MFS FUSE root node. +func NewFileSystem(ipfs *core.IpfsNode, mounts config.Mounts, imp config.Import) *writable.Dir { + return writable.NewDir(ipfs.FilesRoot.GetDirectory(), &writable.Config{ + StoreMtime: mounts.StoreMtime.WithDefault(config.DefaultStoreMtime), + StoreMode: mounts.StoreMode.WithDefault(config.DefaultStoreMode), + DAG: ipfs.DAG, + RepoPath: ipfs.Repo.Path(), + Blksize: fusemnt.BlksizeFromChunker(imp.UnixFSChunker.WithDefault(config.DefaultUnixFSChunker)), + }) } - -// File attributes. -func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { - size, _ := file.mfsFile.Size() - - attr.Size = uint64(size) - if size%blockSize == 0 { - attr.Blocks = uint64(size / blockSize) - } else { - attr.Blocks = uint64(size/blockSize + 1) - } - - mtime, _ := file.mfsFile.ModTime() - attr.Mtime = mtime - - attr.Mode = mfsFileMode - return nil -} - -// Open an MFS file. -func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { - accessMode := req.Flags & fuse.OpenAccessModeMask - flags := mfs.Flags{ - Read: accessMode == fuse.OpenReadOnly || accessMode == fuse.OpenReadWrite, - Write: accessMode == fuse.OpenWriteOnly || accessMode == fuse.OpenReadWrite, - Sync: req.Flags|fuse.OpenSync > 0, - } - fd, err := file.mfsFile.Open(flags) - if err != nil { - return nil, err - } - - if flags.Write { - if err := file.mfsFile.SetModTime(time.Now()); err != nil { - return nil, err - } - } - - return &FileHandler{ - mfsFD: fd, - }, nil -} - -// Sync the file's contents to MFS. -func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { - return file.mfsFile.Sync() -} - -// List file xattr. -func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { - resp.Append(ipfsCIDXattr) - return nil -} - -// Get file xattr. -func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { - switch req.Name { - case ipfsCIDXattr: - node, err := file.mfsFile.GetNode() - if err != nil { - return err - } - resp.Xattr = []byte(node.Cid().String()) - return nil - default: - return fuse.ErrNoXattr - } -} - -// Wrapper for MFS's file descriptor that conforms to the FUSE fs.Handler -// interface. -type FileHandler struct { - mfsFD mfs.FileDescriptor - mu sync.Mutex -} - -// Read a opened MFS file. -func (fh *FileHandler) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - fh.mu.Lock() - defer fh.mu.Unlock() - - _, err := fh.mfsFD.Seek(req.Offset, io.SeekStart) - if err != nil { - return err - } - - buf := make([]byte, req.Size) - l, err := fh.mfsFD.Read(buf) - - resp.Data = buf[:l] - - switch err { - case nil, io.EOF, io.ErrUnexpectedEOF: - return nil - default: - return err - } -} - -// Write writes to an opened MFS file. -func (fh *FileHandler) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { - fh.mu.Lock() - defer fh.mu.Unlock() - - l, err := fh.mfsFD.WriteAt(req.Data, req.Offset) - if err != nil { - return err - } - resp.Size = l - - return nil -} - -// Flushes the file's buffer. -func (fh *FileHandler) Flush(ctx context.Context, req *fuse.FlushRequest) error { - fh.mu.Lock() - defer fh.mu.Unlock() - - return fh.mfsFD.Flush() -} - -// Closes the file. -func (fh *FileHandler) Release(ctx context.Context, req *fuse.ReleaseRequest) error { - fh.mu.Lock() - defer fh.mu.Unlock() - - return fh.mfsFD.Close() -} - -// Create new filesystem. -func NewFileSystem(ipfs *core.IpfsNode) fs.FS { - return &FileSystem{ - root: Dir{ - mfsDir: ipfs.FilesRoot.GetDirectory(), - }, - } -} - -// Check that our structs implement all the interfaces we want. -type mfsDir interface { - fs.Node - fs.NodeGetxattrer - fs.NodeListxattrer - fs.HandleReadDirAller - fs.NodeRequestLookuper - fs.NodeMkdirer - fs.NodeRenamer - fs.NodeRemover - fs.NodeCreater -} - -var _ mfsDir = (*Dir)(nil) - -type mfsFile interface { - fs.Node - fs.NodeGetxattrer - fs.NodeListxattrer - fs.NodeOpener - fs.NodeFsyncer -} - -var _ mfsFile = (*File)(nil) - -type mfsHandler interface { - fs.Handle - fs.HandleReader - fs.HandleWriter - fs.HandleFlusher - fs.HandleReleaser -} - -var _ mfsHandler = (*FileHandler)(nil) diff --git a/fuse/mfs/mount_unix.go b/fuse/mfs/mount_unix.go index 92e0845bc36..6ea7ed89b4c 100644 --- a/fuse/mfs/mount_unix.go +++ b/fuse/mfs/mount_unix.go @@ -1,19 +1,44 @@ -//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse +// Mount/unmount helpers for the /mfs FUSE mount. go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse package mfs import ( + "os" + "time" + + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" + "github.com/ipfs/kubo/config" core "github.com/ipfs/kubo/core" - mount "github.com/ipfs/kubo/fuse/mount" + fusemnt "github.com/ipfs/kubo/fuse/mount" ) +// How long the kernel caches Lookup and Getattr results. 1 second +// matches the go-fuse default and what gocryptfs/rclone use. +// var (not const) because fs.Options needs a *time.Duration. +var mutableCacheTime = time.Second + // Mount mounts MFS at a given location, and returns a mount.Mount instance. -func Mount(ipfs *core.IpfsNode, mountpoint string) (mount.Mount, error) { +func Mount(ipfs *core.IpfsNode, mountpoint string) (fusemnt.Mount, error) { cfg, err := ipfs.Repo.Config() if err != nil { return nil, err } - allowOther := cfg.Mounts.FuseAllowOther - fsys := NewFileSystem(ipfs) - return mount.NewMount(fsys, mountpoint, allowOther) + root := NewFileSystem(ipfs, cfg.Mounts, cfg.Import) + opts := &fs.Options{ + NullPermissions: true, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + EntryTimeout: &mutableCacheTime, + AttrTimeout: &mutableCacheTime, + MountOptions: fuse.MountOptions{ + AllowOther: cfg.Mounts.FuseAllowOther.WithDefault(config.DefaultFuseAllowOther), + FsName: "mfs", + MaxReadAhead: fusemnt.MaxReadAhead, + Debug: os.Getenv("IPFS_FUSE_DEBUG") != "", + ExtraCapabilities: fusemnt.WritableMountCapabilities, + }, + } + return fusemnt.NewMount(root, mountpoint, opts) } diff --git a/fuse/mount/caps.go b/fuse/mount/caps.go new file mode 100644 index 00000000000..07244484ac8 --- /dev/null +++ b/fuse/mount/caps.go @@ -0,0 +1,17 @@ +// FUSE mount capabilities. go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse + +package mount + +import "github.com/hanwen/go-fuse/v2/fuse" + +// WritableMountCapabilities are FUSE capabilities requested for writable +// mounts (/ipns, /mfs). +// +// CAP_ATOMIC_O_TRUNC tells the kernel to pass O_TRUNC to Open instead of +// sending a separate SETATTR(size=0) before Open. Without this, the kernel +// does SETATTR first, which requires opening a write descriptor inside +// Setattr. MFS only allows one write descriptor at a time, so that +// deadlocks. With this capability, O_TRUNC is handled inside Open where +// we already hold the descriptor. +const WritableMountCapabilities = fuse.CAP_ATOMIC_O_TRUNC diff --git a/fuse/mount/errno.go b/fuse/mount/errno.go new file mode 100644 index 00000000000..8591ecf0f0c --- /dev/null +++ b/fuse/mount/errno.go @@ -0,0 +1,27 @@ +// FUSE error mapping helpers. go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse + +package mount + +import ( + "context" + "syscall" + + "github.com/hanwen/go-fuse/v2/fs" +) + +// ReadErrno maps an error from a context-aware read or write to a FUSE +// errno. It exists so context cancellation surfaces as EINTR rather than +// the unspecified code that fs.ToErrno produces for context.Canceled. +// +// The kernel sends FUSE_INTERRUPT when a userspace process is killed +// mid-syscall (Ctrl-C, SIGKILL on a stuck `cat`). go-fuse cancels the +// per-request context in response. Returning EINTR tells the kernel to +// abort the syscall with the right errno; without this, fs.ToErrno +// turns context.Canceled into something the caller can't act on. +func ReadErrno(err error) syscall.Errno { + if err == context.Canceled || err == context.DeadlineExceeded { + return syscall.EINTR + } + return fs.ToErrno(err) +} diff --git a/fuse/mount/fuse.go b/fuse/mount/fuse.go index 313c4af6a30..75cf384877f 100644 --- a/fuse/mount/fuse.go +++ b/fuse/mount/fuse.go @@ -1,4 +1,5 @@ -//go:build !nofuse && !windows && !openbsd && !netbsd && !plan9 +// FUSE mount/unmount lifecycle. go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse package mount @@ -6,19 +7,17 @@ import ( "errors" "fmt" "sync" - "time" - "bazil.org/fuse" - "bazil.org/fuse/fs" + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" ) var ErrNotMounted = errors.New("not mounted") // mount implements go-ipfs/fuse/mount. type mount struct { - mpoint string - filesys fs.FS - fuseConn *fuse.Conn + mpoint string + server *fuse.Server active bool activeLock *sync.RWMutex @@ -26,101 +25,43 @@ type mount struct { unmountOnce sync.Once } -// Mount mounts a fuse fs.FS at a given location, and returns a Mount instance. -// ctx is parent is a ContextGroup to bind the mount's ContextGroup to. -func NewMount(fsys fs.FS, mountpoint string, allowOther bool) (Mount, error) { - var conn *fuse.Conn - var err error - - mountOpts := []fuse.MountOption{ - fuse.MaxReadahead(64 * 1024 * 1024), - fuse.AsyncRead(), - } - - if allowOther { - mountOpts = append(mountOpts, fuse.AllowOther()) - } - conn, err = fuse.Mount(mountpoint, mountOpts...) - +// NewMount mounts a FUSE filesystem at a given location, and returns a Mount instance. +func NewMount(root fs.InodeEmbedder, mountpoint string, opts *fs.Options) (Mount, error) { + PlatformMountOpts(&opts.MountOptions) + server, err := fs.Mount(mountpoint, root, opts) if err != nil { - return nil, err + return nil, fmt.Errorf("mounting %s: %w", mountpoint, err) } m := &mount{ mpoint: mountpoint, - fuseConn: conn, - filesys: fsys, - active: false, + server: server, + active: true, activeLock: &sync.RWMutex{}, } - // launch the mounting process. - if err = m.mount(); err != nil { - _ = m.Unmount() // just in case. - return nil, err - } - - return m, nil -} - -func (m *mount) mount() error { - log.Infof("Mounting %s", m.MountPoint()) - - errs := make(chan error, 1) + // Detect external unmount (e.g. fusermount -u) so IsActive + // returns false and Unmount returns ErrNotMounted. go func() { - // fs.Serve blocks until the filesystem is unmounted. - err := fs.Serve(m.fuseConn, m.filesys) - log.Debugf("%s is unmounted", m.MountPoint()) - if err != nil { - log.Debugf("fs.Serve returned (%s)", err) - errs <- err - } + server.Wait() m.setActive(false) }() - // wait for the mount process to be done, or timed out. - select { - case <-time.After(MountTimeout): - return fmt.Errorf("mounting %s timed out", m.MountPoint()) - case err := <-errs: - return err - case <-m.fuseConn.Ready: - } - - // check if the mount process has an error to report - if err := m.fuseConn.MountError; err != nil { - return err - } - - m.setActive(true) - - log.Infof("Mounted %s", m.MountPoint()) - return nil + log.Infof("Mounted %s", mountpoint) + return m, nil } // unmount is called exactly once to unmount this service. -// note that closing the connection will not always unmount -// properly. If that happens, we bring out the big guns -// (mount.ForceUnmountManyTimes, exec unmount). func (m *mount) unmount() error { log.Infof("Unmounting %s", m.MountPoint()) - // try unmounting with fuse lib - err := fuse.Unmount(m.MountPoint()) + err := m.server.Unmount() if err == nil { m.setActive(false) return nil } log.Warnf("fuse unmount err: %s", err) - // try closing the fuseConn - err = m.fuseConn.Close() - if err == nil { - m.setActive(false) - return nil - } - log.Warnf("fuse conn error: %s", err) - // try mount.ForceUnmountManyTimes if err := ForceUnmountManyTimes(m, 10); err != nil { return err diff --git a/fuse/mount/mode.go b/fuse/mount/mode.go new file mode 100644 index 00000000000..e91b3fea9f5 --- /dev/null +++ b/fuse/mount/mode.go @@ -0,0 +1,50 @@ +package mount + +import "os" + +// Default POSIX modes used by FUSE mounts when the UnixFS DAG node does +// not contain explicit permission metadata. Most data on IPFS does not +// include mode, so these apply to the majority of files and directories. +// +// Per the UnixFS spec, implementations may default to 0755 for directories +// and 0644 for files when mode is absent. +// See https://specs.ipfs.tech/unixfs/#dag-pb-optional-metadata + +// Writable mounts (/ipns, /mfs): standard POSIX defaults matching umask 022. +const ( + DefaultFileModeRW = os.FileMode(0o644) + DefaultDirModeRW = os.ModeDir | 0o755 +) + +// Read-only mount (/ipfs): no write bits. +const ( + DefaultFileModeRO = os.FileMode(0o444) + DefaultDirModeRO = os.ModeDir | 0o555 +) + +// NamespaceRootMode is for the /ipfs/ and /ipns/ root directories. +// Execute-only: these are virtual namespaces where users traverse by +// name (CID or IPNS key) but listing the full namespace is not possible. +const NamespaceRootMode = os.ModeDir | 0o111 + +// SymlinkMode is the POSIX permission bits for symlinks. Symlink +// permissions are always 0777; access control uses the target's mode. +const SymlinkMode = os.FileMode(0o777) + +// MaxReadAhead tells the kernel how far ahead to read in a single FUSE +// request. 64 MiB works well for sequential access (streaming, file +// copies) because most data is served from the local blockstore after +// the initial fetch. Network-backed reads are already chunked by the +// DAG layer, so oversized readahead does not cause extra round-trips. +const MaxReadAhead = 64 * 1024 * 1024 + +// XattrCID is the extended attribute name for the node's CID. +// Follows the convention used by CephFS (ceph.*), Btrfs (btrfs.*), +// and GlusterFS (glusterfs.*) of using a project-specific namespace. +const XattrCID = "ipfs.cid" + +// XattrCIDDeprecated is the old xattr name. Getxattr normalizes it +// to XattrCID and logs a deprecation error so existing tooling keeps +// working while users migrate. +// TODO: remove after 2 releases. +const XattrCIDDeprecated = "ipfs_cid" diff --git a/fuse/mount/mount.go b/fuse/mount/mount.go index 0c42ca26a8b..708ca423493 100644 --- a/fuse/mount/mount.go +++ b/fuse/mount/mount.go @@ -66,6 +66,9 @@ func UnmountCmd(point string) (*exec.Cmd, error) { case "darwin": return exec.Command("diskutil", "umount", "force", point), nil case "linux": + if _, err := exec.LookPath("fusermount3"); err == nil { + return exec.Command("fusermount3", "-u", point), nil + } return exec.Command("fusermount", "-u", point), nil default: return nil, fmt.Errorf("unmount: unimplemented") diff --git a/fuse/mount/opts_darwin.go b/fuse/mount/opts_darwin.go new file mode 100644 index 00000000000..1e99efab0bb --- /dev/null +++ b/fuse/mount/opts_darwin.go @@ -0,0 +1,26 @@ +//go:build darwin && !nofuse + +package mount + +import "github.com/hanwen/go-fuse/v2/fuse" + +// PlatformMountOpts applies macOS-specific FUSE mount options. +func PlatformMountOpts(opts *fuse.MountOptions) { + // volname: Finder shows this instead of the generic "macfuse Volume 0". + if opts.FsName != "" { + opts.Options = append(opts.Options, "volname="+opts.FsName) + } + + // noapplexattr: prevents Finder from probing com.apple.FinderInfo, + // com.apple.ResourceFork, and other Apple-private xattrs on every + // file access. Without this, each stat triggers multiple Getxattr + // calls that all return ENOATTR, adding latency on network-backed + // mounts. + opts.Options = append(opts.Options, "noapplexattr") + + // noappledouble: prevents macOS from creating ._ resource fork + // sidecar files when copying or editing files on the mount. These + // AppleDouble files pollute the DAG with metadata that only macOS + // understands and inflate the CID tree. + opts.Options = append(opts.Options, "noappledouble") +} diff --git a/fuse/mount/opts_other.go b/fuse/mount/opts_other.go new file mode 100644 index 00000000000..e8e382fd1c7 --- /dev/null +++ b/fuse/mount/opts_other.go @@ -0,0 +1,8 @@ +//go:build (linux || freebsd) && !nofuse + +package mount + +import "github.com/hanwen/go-fuse/v2/fuse" + +// PlatformMountOpts is a no-op on Linux and FreeBSD. +func PlatformMountOpts(_ *fuse.MountOptions) {} diff --git a/fuse/mount/stat.go b/fuse/mount/stat.go new file mode 100644 index 00000000000..e5dc7e670d4 --- /dev/null +++ b/fuse/mount/stat.go @@ -0,0 +1,53 @@ +// FUSE stat helpers. go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse + +package mount + +import ( + "strconv" + "strings" + + "github.com/hanwen/go-fuse/v2/fuse" +) + +// StatBlockSize is the POSIX stat(2) block unit. The st_blocks field +// reports allocation in 512-byte units regardless of the filesystem's +// real block size (see `man 2 stat`). Tools like `du`, `ls -s`, and +// `find -size` multiply st_blocks by this constant to compute bytes. +const StatBlockSize = 512 + +// DefaultBlksize is the preferred I/O size (stat.st_blksize) FUSE mounts +// advertise when no chunker-derived value applies (readonly /ipfs, or +// writable /mfs with a rabin/buzhash chunker). Larger hints let tools +// like cp, dd, and rsync use bigger buffers, amortizing FUSE syscall and +// DAG-walk overhead. 1 MiB matches the chunk size of Kubo's +// cross-implementation CID-deterministic import profile (IPIP-499). +// Hardcoded instead of tracking boxo's chunker default so the stat(2) +// contract stays stable across Kubo and boxo upgrades. +const DefaultBlksize = 1024 * 1024 + +// SizeToStatBlocks converts a byte size to the number of 512-byte blocks +// reported by POSIX stat(2) in the st_blocks field, rounded up so a +// non-empty file reports at least one block. +func SizeToStatBlocks(size uint64) uint64 { + return (size + StatBlockSize - 1) / StatBlockSize +} + +// BlksizeFromChunker derives the preferred I/O size hint for the writable +// mounts from the user's Import.UnixFSChunker setting. It extracts the +// byte count from `size-` and returns DefaultBlksize for rabin, +// buzhash, or malformed values (where there is no single preferred size). +// Values are clamped to fuse.MAX_KERNEL_WRITE because the kernel splits +// any larger userspace read/write into MAX_KERNEL_WRITE-sized FUSE ops +// regardless, so hinting past the ceiling just wastes userspace buffers. +func BlksizeFromChunker(chunkerStr string) uint32 { + if sizeStr, ok := strings.CutPrefix(chunkerStr, "size-"); ok { + if size, err := strconv.ParseUint(sizeStr, 10, 64); err == nil && size > 0 { + if size > fuse.MAX_KERNEL_WRITE { + return fuse.MAX_KERNEL_WRITE + } + return uint32(size) + } + } + return DefaultBlksize +} diff --git a/fuse/mount/stat_test.go b/fuse/mount/stat_test.go new file mode 100644 index 00000000000..91d60729f29 --- /dev/null +++ b/fuse/mount/stat_test.go @@ -0,0 +1,60 @@ +//go:build (linux || darwin || freebsd) && !nofuse + +package mount + +import ( + "testing" + + "github.com/hanwen/go-fuse/v2/fuse" +) + +// TestDefaultBlksizeAnchor pins DefaultBlksize to 1 MiB so a silent +// refactor cannot drift the value FUSE mounts advertise to tools. +// See stat.go for the rationale (CID-deterministic profile alignment). +func TestDefaultBlksizeAnchor(t *testing.T) { + if DefaultBlksize != 1024*1024 { + t.Fatalf("DefaultBlksize = %d, want 1 MiB (%d)", DefaultBlksize, 1024*1024) + } +} + +func TestBlksizeFromChunker(t *testing.T) { + tests := []struct { + name string + chunker string + want uint32 + }{ + // Kubo defaults and common user choices. + {"default chunker", "size-262144", 262144}, + {"CID-deterministic profile", "size-1048576", 1024 * 1024}, + {"small custom", "size-65536", 65536}, + + // Non-size chunkers: fall back to DefaultBlksize because no + // single preferred I/O size describes their variable output. + {"rabin", "rabin", DefaultBlksize}, + {"rabin with params", "rabin-512-1024-2048", DefaultBlksize}, + {"buzhash", "buzhash", DefaultBlksize}, + + // Defensive: malformed or empty input must not panic or return + // a surprising value. + {"empty", "", DefaultBlksize}, + {"size prefix only", "size-", DefaultBlksize}, + {"non-numeric size", "size-abc", DefaultBlksize}, + {"zero size", "size-0", DefaultBlksize}, + + // Clamp: values above fuse.MAX_KERNEL_WRITE (the largest single FUSE + // request the kernel delivers) are capped so tools can't be + // tricked into allocating buffers the kernel will just split. + {"above cap clamped", "size-2097152", fuse.MAX_KERNEL_WRITE}, + {"16 MiB clamped", "size-16777216", fuse.MAX_KERNEL_WRITE}, + {"uint32 max clamped", "size-4294967295", fuse.MAX_KERNEL_WRITE}, + {"beyond uint32 clamped", "size-99999999999", fuse.MAX_KERNEL_WRITE}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if got := BlksizeFromChunker(tc.chunker); got != tc.want { + t.Fatalf("BlksizeFromChunker(%q) = %d, want %d", tc.chunker, got, tc.want) + } + }) + } +} diff --git a/fuse/node/mount_darwin.go b/fuse/node/mount_darwin.go index 57fbe4d901e..8cc39eceeed 100644 --- a/fuse/node/mount_darwin.go +++ b/fuse/node/mount_darwin.go @@ -1,248 +1,36 @@ -//go:build !nofuse && darwin +// macFUSE/OSXFUSE availability check. Darwin only. +//go:build darwin && !nofuse package node import ( - "bytes" "fmt" - "os/exec" - "runtime" - "strings" + "os" core "github.com/ipfs/kubo/core" - - "github.com/blang/semver/v4" - unix "golang.org/x/sys/unix" ) func init() { - // this is a hack, but until we need to do it another way, this works. - platformFuseChecks = darwinFuseCheckVersion + platformFuseChecks = darwinFuseCheck } -// dontCheckOSXFUSEConfigKey is a key used to let the user tell us to -// skip fuse checks. -const dontCheckOSXFUSEConfigKey = "DontCheckOSXFUSE" - -// fuseVersionPkg is the go pkg url for fuse-version. -const fuseVersionPkg = "github.com/jbenet/go-fuse-version/fuse-version" - -// errStrFuseRequired is returned when we're sure the user does not have fuse. -const errStrFuseRequired = `OSXFUSE not found. - -OSXFUSE is required to mount, please install it. -NOTE: Version 2.7.2 or higher required; prior versions are known to kernel panic! -It is recommended you install it from the OSXFUSE website: - - http://osxfuse.github.io/ - -For more help, see: - - https://github.com/ipfs/kubo/issues/177 -` - -// errStrNoFuseHeaders is included in the output of `go get ` if there -// are no fuse headers. this means they don't have OSXFUSE installed. -var errStrNoFuseHeaders = "no such file or directory: '/usr/local/lib/libosxfuse.dylib'" - -var errStrUpgradeFuse = `OSXFUSE version %s not supported. - -OSXFUSE versions <2.7.2 are known to cause kernel panics! -Please upgrade to the latest OSXFUSE version. -It is recommended you install it from the OSXFUSE website: - - http://osxfuse.github.io/ - -For more help, see: - - https://github.com/ipfs/kubo/issues/177 -` - -type errNeedFuseVersion struct { - cause string -} - -func (me errNeedFuseVersion) Error() string { - return fmt.Sprintf(`unable to check fuse version. - -Dear User, - -Before mounting, we must check your version of OSXFUSE. We are protecting -you from a nasty kernel panic we found in OSXFUSE versions <2.7.2.[1]. To -make matters worse, it's harder than it should be to check whether you have -the right version installed...[2]. We've automated the process with the -help of a little tool. We tried to install it, but something went wrong[3]. -Please install it yourself by running: - - go get %s - -You can also stop ipfs from running these checks and use whatever OSXFUSE -version you have by running: - - ipfs config --bool %s true - -[1]: https://github.com/ipfs/kubo/issues/177 -[2]: https://github.com/ipfs/kubo/pull/533 -[3]: %s -`, fuseVersionPkg, dontCheckOSXFUSEConfigKey, me.cause) +// macFUSE mount helper paths, checked in the same order as go-fuse. +var macFUSEPaths = []string{ + "/Library/Filesystems/macfuse.fs/Contents/Resources/mount_macfuse", + "/Library/Filesystems/osxfuse.fs/Contents/Resources/mount_osxfuse", } -var errStrFailedToRunFuseVersion = `unable to check fuse version. - -Dear User, - -Before mounting, we must check your version of OSXFUSE. We are protecting -you from a nasty kernel panic we found in OSXFUSE versions <2.7.2.[1]. To -make matters worse, it's harder than it should be to check whether you have -the right version installed...[2]. We've automated the process with the -help of a little tool. We tried to run it, but something went wrong[3]. -Please, try to run it yourself with: - - go get %s - fuse-version - -You should see something like this: - - > fuse-version - fuse-version -only agent - OSXFUSE.AgentVersion: 2.7.3 - -Just make sure the number is 2.7.2 or higher. You can then stop ipfs from -trying to run these checks with: - - ipfs config --bool %s true - -[1]: https://github.com/ipfs/kubo/issues/177 -[2]: https://github.com/ipfs/kubo/pull/533 -[3]: %s -` - -var errStrFixConfig = `config key invalid: %s %v -You may be able to get this error to go away by setting it again: - - ipfs config --bool %s true - -Either way, please tell us at: http://github.com/ipfs/kubo/issues -` - -func darwinFuseCheckVersion(node *core.IpfsNode) error { - // on OSX, check FUSE version. - if runtime.GOOS != "darwin" { - return nil - } - - ov, errGFV := tryGFV() - if errGFV != nil { - // if we failed AND the user has told us to ignore the check we - // continue. this is in case fuse-version breaks or the user cannot - // install it, but is sure their fuse version will work. - if skip, err := userAskedToSkipFuseCheck(node); err != nil { - return err - } else if skip { - return nil // user told us not to check version... ok.... +func darwinFuseCheck(_ *core.IpfsNode) error { + for _, p := range macFUSEPaths { + if _, err := os.Stat(p); err == nil { + return nil } - return errGFV - } - - log.Debug("mount: osxfuse version:", ov) - - min := semver.MustParse("2.7.2") - curr, err := semver.Make(ov) - if err != nil { - return err } + return fmt.Errorf(`macFUSE not found. - if curr.LT(min) { - return fmt.Errorf(errStrUpgradeFuse, ov) - } - return nil -} - -func tryGFV() (string, error) { - // first try sysctl. it may work! - ov, err := trySysctl() - if err == nil { - return ov, nil - } - log.Debug(err) +macFUSE is required to mount FUSE filesystems on macOS. +Install it from https://osxfuse.github.io/ or via Homebrew: - return tryGFVFromFuseVersion() -} - -func trySysctl() (string, error) { - v, err := unix.Sysctl("osxfuse.version.number") - if err != nil { - log.Debug("mount: sysctl osxfuse.version.number:", "failed") - return "", err - } - log.Debug("mount: sysctl osxfuse.version.number:", v) - return v, nil -} - -func tryGFVFromFuseVersion() (string, error) { - if err := ensureFuseVersionIsInstalled(); err != nil { - return "", err - } - - cmd := exec.Command("fuse-version", "-q", "-only", "agent", "-s", "OSXFUSE") - out := new(bytes.Buffer) - cmd.Stdout = out - if err := cmd.Run(); err != nil { - return "", fmt.Errorf(errStrFailedToRunFuseVersion, fuseVersionPkg, dontCheckOSXFUSEConfigKey, err) - } - - return out.String(), nil -} - -func ensureFuseVersionIsInstalled() error { - // see if fuse-version is there - if _, err := exec.LookPath("fuse-version"); err == nil { - return nil // got it! - } - - // try installing it... - log.Debug("fuse-version: no fuse-version. attempting to install.") - cmd := exec.Command("go", "install", "github.com/jbenet/go-fuse-version/fuse-version") - cmdout := new(bytes.Buffer) - cmd.Stdout = cmdout - cmd.Stderr = cmdout - if err := cmd.Run(); err != nil { - // Ok, install fuse-version failed. is it they don't have fuse? - cmdoutstr := cmdout.String() - if strings.Contains(cmdoutstr, errStrNoFuseHeaders) { - // yes! it is! they don't have fuse! - return fmt.Errorf(errStrFuseRequired) - } - - log.Debug("fuse-version: failed to install.") - s := err.Error() + "\n" + cmdoutstr - return errNeedFuseVersion{s} - } - - // ok, try again... - if _, err := exec.LookPath("fuse-version"); err != nil { - log.Debug("fuse-version: failed to install?") - return errNeedFuseVersion{err.Error()} - } - - log.Debug("fuse-version: install success") - return nil -} - -func userAskedToSkipFuseCheck(node *core.IpfsNode) (skip bool, err error) { - val, err := node.Repo.GetConfigKey(dontCheckOSXFUSEConfigKey) - if err != nil { - return false, nil // failed to get config value. don't skip check. - } - - switch val := val.(type) { - case string: - return val == "true", nil - case bool: - return val, nil - default: - // got config value, but it's invalid... don't skip check, ask the user to fix it... - return false, fmt.Errorf(errStrFixConfig, dontCheckOSXFUSEConfigKey, val, - dontCheckOSXFUSEConfigKey) - } + brew install macfuse +`) } diff --git a/fuse/node/mount_nofuse.go b/fuse/node/mount_nofuse.go index 026f002ff08..a33afc5d46f 100644 --- a/fuse/node/mount_nofuse.go +++ b/fuse/node/mount_nofuse.go @@ -1,3 +1,5 @@ +// Stub when built with "go build -tags nofuse". Excludes windows +// which never has FUSE support regardless of build tags. //go:build !windows && nofuse package node diff --git a/fuse/node/mount_notsupp.go b/fuse/node/mount_notsupp.go index d5f0d2cbe9c..08949b05be2 100644 --- a/fuse/node/mount_notsupp.go +++ b/fuse/node/mount_notsupp.go @@ -1,4 +1,7 @@ -//go:build (!nofuse && openbsd) || (!nofuse && netbsd) || (!nofuse && plan9) +// Stub for platforms where go-fuse does not compile but the user +// has not set the nofuse build tag. Returns a clear error instead +// of a build failure. See https://github.com/ipfs/kubo/issues/5334. +//go:build (openbsd || netbsd || plan9) && !nofuse package node diff --git a/fuse/node/mount_test.go b/fuse/node/mount_test.go index b296e7e95b5..986d83a6fd3 100644 --- a/fuse/node/mount_test.go +++ b/fuse/node/mount_test.go @@ -1,29 +1,20 @@ -//go:build !openbsd && !nofuse && !netbsd && !plan9 +// go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse package node import ( - "context" "os" - "strings" "testing" "time" - "bazil.org/fuse" - core "github.com/ipfs/kubo/core" + coremock "github.com/ipfs/kubo/core/mock" + "github.com/ipfs/kubo/fuse/fusetest" ipns "github.com/ipfs/kubo/fuse/ipns" mount "github.com/ipfs/kubo/fuse/mount" - - ci "github.com/libp2p/go-libp2p-testing/ci" ) -func maybeSkipFuseTests(t *testing.T) { - if ci.NoFuse() { - t.Skip("Skipping FUSE tests") - } -} - func mkdir(t *testing.T, path string) { err := os.Mkdir(path, os.ModeDir|os.ModePerm) if err != nil { @@ -31,62 +22,102 @@ func mkdir(t *testing.T, path string) { } } -// Test externally unmounting, then trying to unmount in code. +// TestExternalUnmount runs an external unmount on each of the three +// FUSE mounts (/ipfs, /ipns, /mfs) and confirms the corresponding +// Mount.IsActive flips to false and Unmount returns ErrNotMounted. +// This exercises the goroutine in fuse/mount/fuse.go that watches +// fuse.Server.Wait() to detect out-of-band unmounts. func TestExternalUnmount(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - - // TODO: needed? - maybeSkipFuseTests(t) - - node, err := core.NewNode(context.Background(), &core.BuildCfg{}) - if err != nil { - t.Fatal(err) + fusetest.SkipUnlessFUSE(t) + + cases := []struct { + name string + target func(node *core.IpfsNode, paths mountPaths) (string, mount.Mount) + }{ + { + name: "ipfs", + target: func(node *core.IpfsNode, p mountPaths) (string, mount.Mount) { + return p.ipfs, node.Mounts.Ipfs + }, + }, + { + name: "ipns", + target: func(node *core.IpfsNode, p mountPaths) (string, mount.Mount) { + return p.ipns, node.Mounts.Ipns + }, + }, + { + name: "mfs", + target: func(node *core.IpfsNode, p mountPaths) (string, mount.Mount) { + return p.mfs, node.Mounts.Mfs + }, + }, } - err = ipns.InitializeKeyspace(node, node.PrivateKey) - if err != nil { - t.Fatal(err) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + node, paths := setupAllMounts(t) + mountpoint, target := tc.target(node, paths) + + // Run shell command to externally unmount the directory. + cmd, err := mount.UnmountCmd(mountpoint) + if err != nil { + t.Fatal(err) + } + if err := cmd.Run(); err != nil { + t.Fatal(err) + } + + // The goroutine watching fuse.Server.Wait() needs a moment + // to observe the kernel-side unmount and flip IsActive. + time.Sleep(100 * time.Millisecond) + + if target.IsActive() { + t.Fatal("mount should be inactive after external unmount") + } + if err := target.Unmount(); err != mount.ErrNotMounted { + t.Fatalf("expected ErrNotMounted, got %v", err) + } + }) } +} - // get the test dir paths (/tmp/TestExternalUnmount) - dir := t.TempDir() - - ipfsDir := dir + "/ipfs" - ipnsDir := dir + "/ipns" - mfsDir := dir + "/mfs" - mkdir(t, ipfsDir) - mkdir(t, ipnsDir) - mkdir(t, mfsDir) +type mountPaths struct { + ipfs, ipns, mfs string +} - err = Mount(node, ipfsDir, ipnsDir, mfsDir) - if err != nil { - if strings.Contains(err.Error(), "unable to check fuse version") || err == fuse.ErrOSXFUSENotFound { - t.Skip(err) - } - } +// setupAllMounts builds an IpfsNode and mounts all three FUSE filesystems +// under a fresh temp directory. Cleanup unmounts whatever is still active. +// +// The node is built via coremock.NewMockNode so it is online: doMount +// only mounts /ipns when node.IsOnline is true, and the test needs all +// three mounts populated. +func setupAllMounts(t *testing.T) (*core.IpfsNode, mountPaths) { + t.Helper() + node, err := coremock.NewMockNode() if err != nil { - t.Fatalf("error mounting: %v", err) + t.Fatal(err) } - - // Run shell command to externally unmount the directory - cmd, err := mount.UnmountCmd(ipfsDir) - if err != nil { + if err := ipns.InitializeKeyspace(node, node.PrivateKey); err != nil { t.Fatal(err) } - if err := cmd.Run(); err != nil { - t.Fatal(err) + dir := t.TempDir() + paths := mountPaths{ + ipfs: dir + "/ipfs", + ipns: dir + "/ipns", + mfs: dir + "/mfs", } + mkdir(t, paths.ipfs) + mkdir(t, paths.ipns) + mkdir(t, paths.mfs) - // TODO(noffle): it takes a moment for the goroutine that's running fs.Serve to be notified and do its cleanup. - time.Sleep(time.Millisecond * 100) + err = Mount(node, paths.ipfs, paths.ipns, paths.mfs) + fusetest.MountError(t, err) - // Attempt to unmount IPFS; it should unmount successfully. - err = node.Mounts.Ipfs.Unmount() - if err != mount.ErrNotMounted { - t.Fatal("Unmount should have failed") - } + t.Cleanup(func() { + Unmount(node) + }) + return node, paths } diff --git a/fuse/node/mount_unix.go b/fuse/node/mount_unix.go index aa8da944c55..e03fb2dc6e4 100644 --- a/fuse/node/mount_unix.go +++ b/fuse/node/mount_unix.go @@ -1,4 +1,5 @@ -//go:build !windows && !openbsd && !netbsd && !plan9 && !nofuse +// Mounts all three FUSE filesystems (/ipfs, /ipns, /mfs). go-fuse only builds on linux, darwin, and freebsd. +//go:build (linux || darwin || freebsd) && !nofuse package node @@ -26,7 +27,7 @@ const fuseNoDirectory = "fusermount: failed to access mountpoint" const fuseExitStatus1 = "fusermount: exit status 1" // platformFuseChecks can get overridden by arch-specific files -// to run fuse checks (like checking the OSXFUSE version). +// to run pre-mount checks (e.g. verifying macFUSE is installed). var platformFuseChecks = func(*core.IpfsNode) error { return nil } @@ -69,8 +70,8 @@ func doMount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { fmtFuseErr := func(err error, mountpoint string) error { s := err.Error() if strings.Contains(s, fuseNoDirectory) { - s = strings.Replace(s, `fusermount: "fusermount:`, "", -1) - s = strings.Replace(s, `\n", exit status 1`, "", -1) + s = strings.ReplaceAll(s, `fusermount: "fusermount:`, "") + s = strings.ReplaceAll(s, `\n", exit status 1`, "") return errors.New(s) } if s == fuseExitStatus1 { diff --git a/fuse/readonly/ipfs_test.go b/fuse/readonly/ipfs_test.go index c4f8afa511b..a644ac711f1 100644 --- a/fuse/readonly/ipfs_test.go +++ b/fuse/readonly/ipfs_test.go @@ -1,4 +1,9 @@ -//go:build !nofuse && !openbsd && !netbsd && !plan9 +//go:build (linux || darwin || freebsd) && !nofuse + +// Unit tests for the read-only /ipfs FUSE mount. +// These test the filesystem implementation directly without a daemon. +// End-to-end tests that exercise mount/unmount through a real daemon +// live in test/cli/fuse/. package readonly @@ -13,30 +18,41 @@ import ( gopath "path" "strings" "sync" + "syscall" "testing" + "time" - "bazil.org/fuse" + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" core "github.com/ipfs/kubo/core" coreapi "github.com/ipfs/kubo/core/coreapi" coremock "github.com/ipfs/kubo/core/mock" - fstest "bazil.org/fuse/fs/fstestutil" chunker "github.com/ipfs/boxo/chunker" "github.com/ipfs/boxo/files" dag "github.com/ipfs/boxo/ipld/merkledag" + ft "github.com/ipfs/boxo/ipld/unixfs" importer "github.com/ipfs/boxo/ipld/unixfs/importer" uio "github.com/ipfs/boxo/ipld/unixfs/io" "github.com/ipfs/boxo/path" ipld "github.com/ipfs/go-ipld-format" "github.com/ipfs/go-test/random" - ci "github.com/libp2p/go-libp2p-testing/ci" + options "github.com/ipfs/kubo/core/coreiface/options" + "github.com/ipfs/kubo/fuse/fusetest" + fusemnt "github.com/ipfs/kubo/fuse/mount" + "github.com/stretchr/testify/require" ) -func maybeSkipFuseTests(t *testing.T) { - if ci.NoFuse() { - t.Skip("Skipping FUSE tests") - } +func testMount(t *testing.T, root fs.InodeEmbedder) string { + t.Helper() + return fusetest.TestMount(t, root, &fs.Options{ + AttrTimeout: &immutableAttrCacheTime, + EntryTimeout: &immutableAttrCacheTime, + MountOptions: fuse.MountOptions{ + MaxReadAhead: fusemnt.MaxReadAhead, + }, + }) } func randObj(t *testing.T, nd *core.IpfsNode, size int64) (ipld.Node, []byte) { @@ -54,9 +70,8 @@ func randObj(t *testing.T, nd *core.IpfsNode, size int64) (ipld.Node, []byte) { return obj, buf } -func setupIpfsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.Mount) { +func setupIpfsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, string) { t.Helper() - maybeSkipFuseTests(t) var err error if node == nil { @@ -66,29 +81,149 @@ func setupIpfsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.M } } - fs := NewFileSystem(node) - mnt, err := fstest.MountedT(t, fs, nil) - if err == fuse.ErrOSXFUSENotFound { - t.Skip(err) + root := NewRoot(node) + mntDir := testMount(t, root) + + return node, mntDir +} + +// Test that an empty directory can be listed without errors. +func TestEmptyDirListing(t *testing.T) { + nd, mntDir := setupIpfsTest(t, nil) + + // Create an empty UnixFS directory and add it to the DAG. + db, err := uio.NewDirectory(nd.DAG) + if err != nil { + t.Fatal(err) + } + emptyDir, err := db.GetNode() + if err != nil { + t.Fatal(err) + } + if err := nd.DAG.Add(nd.Context(), emptyDir); err != nil { + t.Fatal(err) + } + + // List it via FUSE. + dirPath := gopath.Join(mntDir, emptyDir.Cid().String()) + entries, err := os.ReadDir(dirPath) + if err != nil { + t.Fatal(err) + } + if len(entries) != 0 { + t.Fatalf("expected empty directory, got %d entries", len(entries)) + } +} + +// Test that a bare file CID can be read at the /ipfs mount root. +func TestBareFileCID(t *testing.T) { + nd, mntDir := setupIpfsTest(t, nil) + + api, err := coreapi.NewCoreAPI(nd) + if err != nil { + t.Fatal(err) + } + + content := []byte("bare file CID test content") + + t.Run("CIDv0", func(t *testing.T) { + resolved, err := api.Unixfs().Add(t.Context(), + files.NewBytesFile(content), + options.Unixfs.CidVersion(0), + options.Unixfs.RawLeaves(false)) + if err != nil { + t.Fatal(err) + } + cidStr := resolved.RootCid().String() + got, err := os.ReadFile(gopath.Join(mntDir, cidStr)) + if err != nil { + t.Fatalf("read %s via FUSE: %v", cidStr, err) + } + if !bytes.Equal(got, content) { + t.Fatalf("content mismatch: got %d bytes, want %d", len(got), len(content)) + } + }) + + t.Run("CIDv1", func(t *testing.T) { + resolved, err := api.Unixfs().Add(t.Context(), + files.NewBytesFile(content), + options.Unixfs.CidVersion(1), + options.Unixfs.RawLeaves(true)) + if err != nil { + t.Fatal(err) + } + cidStr := resolved.RootCid().String() + got, err := os.ReadFile(gopath.Join(mntDir, cidStr)) + if err != nil { + t.Fatalf("read %s via FUSE: %v", cidStr, err) + } + if !bytes.Equal(got, content) { + t.Fatalf("content mismatch: got %d bytes, want %d", len(got), len(content)) + } + }) +} + +// Test reading a directory that contains both dag-pb and raw-leaf children. +// This is the typical layout produced by `ipfs add --raw-leaves`: the +// directory node is dag-pb, while file leaves are raw blocks. +func TestMixedDAGDirectory(t *testing.T) { + nd, mntDir := setupIpfsTest(t, nil) + + api, err := coreapi.NewCoreAPI(nd) + if err != nil { + t.Fatal(err) + } + + fileA := []byte("file in dag-pb leaf") + fileB := []byte("file in raw leaf") + + dir := files.NewMapDirectory(map[string]files.Node{ + "dagpb.txt": files.NewBytesFile(fileA), + "raw.txt": files.NewBytesFile(fileB), + }) + + // CIDv1 with raw leaves: directory is dag-pb, file leaves are raw. + resolved, err := api.Unixfs().Add(t.Context(), dir, + options.Unixfs.CidVersion(1), + options.Unixfs.RawLeaves(true)) + if err != nil { + t.Fatal(err) } + + dirPath := gopath.Join(mntDir, resolved.RootCid().String()) + + entries, err := os.ReadDir(dirPath) if err != nil { - t.Fatalf("error mounting temporary directory: %v", err) + t.Fatal(err) + } + if len(entries) != 2 { + t.Fatalf("expected 2 entries, got %d", len(entries)) } - return node, mnt + for _, tc := range []struct { + name string + want []byte + }{ + {"dagpb.txt", fileA}, + {"raw.txt", fileB}, + } { + got, err := os.ReadFile(gopath.Join(dirPath, tc.name)) + if err != nil { + t.Fatalf("read %s: %v", tc.name, err) + } + if !bytes.Equal(got, tc.want) { + t.Fatalf("%s: content mismatch: got %d bytes, want %d", tc.name, len(got), len(tc.want)) + } + } } // Test writing an object and reading it back through fuse. func TestIpfsBasicRead(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - nd, mnt := setupIpfsTest(t, nil) - defer mnt.Close() + nd, mntDir := setupIpfsTest(t, nil) fi, data := randObj(t, nd, 10000) k := fi.Cid() - fname := gopath.Join(mnt.Dir, k.String()) + fname := gopath.Join(mntDir, k.String()) rbuf, err := os.ReadFile(fname) if err != nil { t.Fatal(err) @@ -123,11 +258,7 @@ func getPaths(t *testing.T, ipfs *core.IpfsNode, name string, n *dag.ProtoNode) // Perform a large number of concurrent reads to stress the system. func TestIpfsStressRead(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - nd, mnt := setupIpfsTest(t, nil) - defer mnt.Close() + nd, mntDir := setupIpfsTest(t, nil) api, err := coreapi.NewCoreAPI(nd) if err != nil { @@ -191,11 +322,12 @@ func TestIpfsStressRead(t *testing.T) { } relpath := strings.Replace(item.String(), item.Namespace(), "", 1) - fname := gopath.Join(mnt.Dir, relpath) + fname := gopath.Join(mntDir, relpath) rbuf, err := os.ReadFile(fname) if err != nil { errs <- err + continue } // nd.Context() is never closed which leads to @@ -204,12 +336,16 @@ func TestIpfsStressRead(t *testing.T) { read, err := api.Unixfs().Get(ctx, item) if err != nil { + cancelFunc() errs <- err + continue } data, err := io.ReadAll(read.(files.File)) if err != nil { + cancelFunc() errs <- err + continue } cancelFunc() @@ -235,11 +371,7 @@ func TestIpfsStressRead(t *testing.T) { // Test writing a file and reading it back. func TestIpfsBasicDirRead(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - nd, mnt := setupIpfsTest(t, nil) - defer mnt.Close() + nd, mntDir := setupIpfsTest(t, nil) // Make a 'file' fi, data := randObj(t, nd, 10000) @@ -264,7 +396,7 @@ func TestIpfsBasicDirRead(t *testing.T) { t.Fatal(err) } - dirname := gopath.Join(mnt.Dir, d1nd.Cid().String()) + dirname := gopath.Join(mntDir, d1nd.Cid().String()) fname := gopath.Join(dirname, "actual") rbuf, err := os.ReadFile(fname) if err != nil { @@ -289,16 +421,12 @@ func TestIpfsBasicDirRead(t *testing.T) { // Test to make sure the filesystem reports file sizes correctly. func TestFileSizeReporting(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - nd, mnt := setupIpfsTest(t, nil) - defer mnt.Close() + nd, mntDir := setupIpfsTest(t, nil) fi, data := randObj(t, nd, 10000) k := fi.Cid() - fname := gopath.Join(mnt.Dir, k.String()) + fname := gopath.Join(mntDir, k.String()) finfo, err := os.Stat(fname) if err != nil { @@ -309,3 +437,426 @@ func TestFileSizeReporting(t *testing.T) { t.Fatal("Read incorrect size from stat!") } } + +// Test that mode and mtime stored in UnixFS metadata are reported in stat. +func TestUnixFSMetadataInStat(t *testing.T) { + nd, mntDir := setupIpfsTest(t, nil) + + storedMode := os.FileMode(0o755) + storedMtime := time.Date(2025, 6, 15, 12, 0, 0, 0, time.UTC) + content := []byte("file with metadata") + + // Create a UnixFS node with explicit mode and mtime. + pbdata := ft.FilePBDataWithStat(content, uint64(len(content)), storedMode, storedMtime) + node := dag.NodeWithData(pbdata) + if err := nd.DAG.Add(nd.Context(), node); err != nil { + t.Fatal(err) + } + + fpath := gopath.Join(mntDir, node.Cid().String()) + fi, err := os.Stat(fpath) + if err != nil { + t.Fatal(err) + } + + if fi.Mode().Perm() != storedMode.Perm() { + t.Fatalf("expected mode %04o, got %04o", storedMode.Perm(), fi.Mode().Perm()) + } + if !fi.ModTime().Equal(storedMtime) { + t.Fatalf("expected mtime %v, got %v", storedMtime, fi.ModTime()) + } +} + +// Test that files without UnixFS metadata get the read-only defaults. +func TestDefaultModeReadonly(t *testing.T) { + nd, mntDir := setupIpfsTest(t, nil) + + // Create a plain UnixFS file (no mode/mtime metadata). + fi, _ := randObj(t, nd, 100) + fpath := gopath.Join(mntDir, fi.Cid().String()) + + finfo, err := os.Stat(fpath) + if err != nil { + t.Fatal(err) + } + if finfo.Mode().Perm() != fusemnt.DefaultFileModeRO.Perm() { + t.Fatalf("expected default mode %04o, got %04o", fusemnt.DefaultFileModeRO.Perm(), finfo.Mode().Perm()) + } +} + +// Test that ipfs.cid xattr returns the correct CID for files and directories. +func TestXattrCID(t *testing.T) { + nd, _ := setupIpfsTest(t, nil) + + t.Run("file", func(t *testing.T) { + obj, _ := randObj(t, nd, 100) + node := &Node{ipfs: nd, nd: obj} + + dest := make([]byte, 256) + sz, errno := node.Listxattr(t.Context(), dest) + if errno != 0 { + t.Fatalf("Listxattr: %v", errno) + } + if !bytes.Contains(dest[:sz], []byte(fusemnt.XattrCID)) { + t.Fatal("ipfs.cid not listed") + } + + sz, errno = node.Getxattr(t.Context(), fusemnt.XattrCID, dest) + if errno != 0 { + t.Fatalf("Getxattr: %v", errno) + } + if string(dest[:sz]) != obj.Cid().String() { + t.Fatalf("expected CID %s, got %s", obj.Cid().String(), string(dest[:sz])) + } + }) + + t.Run("directory", func(t *testing.T) { + db, err := uio.NewDirectory(nd.DAG) + if err != nil { + t.Fatal(err) + } + dirNode, err := db.GetNode() + if err != nil { + t.Fatal(err) + } + if err := nd.DAG.Add(nd.Context(), dirNode); err != nil { + t.Fatal(err) + } + node := &Node{ipfs: nd, nd: dirNode} + + dest := make([]byte, 256) + sz, errno := node.Listxattr(t.Context(), dest) + if errno != 0 { + t.Fatalf("Listxattr: %v", errno) + } + if !bytes.Contains(dest[:sz], []byte(fusemnt.XattrCID)) { + t.Fatal("ipfs.cid not listed") + } + + sz, errno = node.Getxattr(t.Context(), fusemnt.XattrCID, dest) + if errno != 0 { + t.Fatalf("Getxattr: %v", errno) + } + if string(dest[:sz]) != dirNode.Cid().String() { + t.Fatalf("expected CID %s, got %s", dirNode.Cid().String(), string(dest[:sz])) + } + }) + +} + +// Test that symlinks in UnixFS are rendered via Readlink. +func TestReadlink(t *testing.T) { + nd, mntDir := setupIpfsTest(t, nil) + + // Build a directory containing a symlink. + db, err := uio.NewDirectory(nd.DAG) + if err != nil { + t.Fatal(err) + } + + target := "hello.txt" + slData, err := ft.SymlinkData(target) + if err != nil { + t.Fatal(err) + } + symlinkNode := dag.NodeWithData(slData) + if err := nd.DAG.Add(nd.Context(), symlinkNode); err != nil { + t.Fatal(err) + } + if err := db.AddChild(nd.Context(), "link", symlinkNode); err != nil { + t.Fatal(err) + } + + dirNode, err := db.GetNode() + if err != nil { + t.Fatal(err) + } + if err := nd.DAG.Add(nd.Context(), dirNode); err != nil { + t.Fatal(err) + } + + linkPath := gopath.Join(mntDir, dirNode.Cid().String(), "link") + got, err := os.Readlink(linkPath) + if err != nil { + t.Fatal(err) + } + if got != target { + t.Fatalf("expected readlink %q, got %q", target, got) + } +} + +// Test that readdir reports symlinks with ModeSymlink so that +// tools like ls -l and find -type l see the correct file type. +func TestReaddirSymlink(t *testing.T) { + nd, mntDir := setupIpfsTest(t, nil) + + db, err := uio.NewDirectory(nd.DAG) + require.NoError(t, err) + + // Regular file child. + fileData := []byte("hello") + fileNode := dag.NodeWithData(ft.FilePBData(fileData, uint64(len(fileData)))) + require.NoError(t, nd.DAG.Add(nd.Context(), fileNode)) + require.NoError(t, db.AddChild(nd.Context(), "regular", fileNode)) + + // Symlink child. + slData, err := ft.SymlinkData("hello") + require.NoError(t, err) + symlinkNode := dag.NodeWithData(slData) + require.NoError(t, nd.DAG.Add(nd.Context(), symlinkNode)) + require.NoError(t, db.AddChild(nd.Context(), "link", symlinkNode)) + + dirNode, err := db.GetNode() + require.NoError(t, err) + require.NoError(t, nd.DAG.Add(nd.Context(), dirNode)) + + entries, err := os.ReadDir(gopath.Join(mntDir, dirNode.Cid().String())) + require.NoError(t, err) + + found := false + for _, e := range entries { + if e.Name() == "link" { + require.NotZero(t, e.Type()&os.ModeSymlink, "readdir should report symlink type") + found = true + } + if e.Name() == "regular" { + require.Zero(t, e.Type()&os.ModeSymlink, "regular file should not have symlink type") + } + } + require.True(t, found, "symlink entry not found in readdir") +} + +// Test reading a slice from the middle of a file, skipping both +// the beginning and the end. +func TestSeekRead(t *testing.T) { + nd, mntDir := setupIpfsTest(t, nil) + + obj, data := randObj(t, nd, 10000) + fpath := gopath.Join(mntDir, obj.Cid().String()) + + f, err := os.Open(fpath) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + off := int64(3000) + readLen := 2000 + if _, err := f.Seek(off, io.SeekStart); err != nil { + t.Fatal(err) + } + + buf := make([]byte, readLen) + n, err := io.ReadFull(f, buf) + if err != nil { + t.Fatal(err) + } + if n != readLen { + t.Fatalf("short read: got %d, want %d", n, readLen) + } + if !bytes.Equal(buf, data[off:off+int64(readLen)]) { + t.Fatal("content mismatch for middle slice") + } +} + +// Test that concurrent reads of the same large file produce correct data. +// The kernel sends multiple Read requests concurrently via readahead; +// without a mutex on roFileHandle the DagReader's internal state +// corrupts, causing data mismatches or panics. +func TestConcurrentLargeFileRead(t *testing.T) { + nd, mntDir := setupIpfsTest(t, nil) + + // 1 MiB + 1 byte: large enough to span multiple DAG nodes and + // trigger concurrent kernel readahead requests. + fi, data := randObj(t, nd, 1024*1024+1) + fpath := gopath.Join(mntDir, fi.Cid().String()) + + // Multiple goroutines opening and reading the same file exercises + // both per-handle serialization (Seek+Read within one handle) and + // independent handle isolation (separate DagReaders). + var wg sync.WaitGroup + for range 8 { + wg.Go(func() { + got, err := os.ReadFile(fpath) + if err != nil { + t.Errorf("ReadFile: %v", err) + return + } + if !bytes.Equal(got, data) { + t.Errorf("data mismatch: got %d bytes, want %d", len(got), len(data)) + } + }) + } + wg.Wait() +} + +// blockingDagReader is a uio.DagReader that blocks in CtxReadFull until +// the supplied context is cancelled. Used to verify that roFileHandle +// propagates cancellation from FUSE down to the underlying reader. +type blockingDagReader struct { + entered chan struct{} // closed when CtxReadFull begins blocking +} + +func (b *blockingDagReader) CtxReadFull(ctx context.Context, _ []byte) (int, error) { + close(b.entered) + <-ctx.Done() + return 0, ctx.Err() +} + +// Stub uio.DagReader methods that the test does not exercise. Returning +// zero values keeps roFileHandle.Read on the CtxReadFull path. +func (*blockingDagReader) Seek(int64, int) (int64, error) { return 0, nil } +func (*blockingDagReader) Read([]byte) (int, error) { return 0, io.EOF } +func (*blockingDagReader) Close() error { return nil } +func (*blockingDagReader) WriteTo(io.Writer) (int64, error) { return 0, nil } +func (*blockingDagReader) Size() uint64 { return 0 } +func (*blockingDagReader) Mode() os.FileMode { return 0 } +func (*blockingDagReader) ModTime() time.Time { return time.Time{} } + +var _ uio.DagReader = (*blockingDagReader)(nil) + +// TestReadCancellationUnblocks confirms that cancelling the context +// passed to roFileHandle.Read returns promptly with EINTR. This guards +// the "killing a stuck cat works" fix: the kernel sends FUSE_INTERRUPT +// when a userspace process is killed mid-read, go-fuse cancels the +// per-request context, and the read handler must propagate cancellation +// down to the DagReader instead of blocking forever on a stuck fetch. +func TestReadCancellationUnblocks(t *testing.T) { + fake := &blockingDagReader{entered: make(chan struct{})} + fh := &roFileHandle{r: fake} + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + type result struct { + errno syscall.Errno + } + done := make(chan result, 1) + go func() { + buf := make([]byte, 4096) + _, errno := fh.Read(ctx, buf, 0) + done <- result{errno} + }() + + // Wait for the fake reader to actually block on ctx.Done() before + // cancelling, so the test exercises mid-read cancellation rather + // than racing the goroutine start. + select { + case <-fake.entered: + case <-time.After(5 * time.Second): + t.Fatal("CtxReadFull never entered; cancellation path unreachable") + } + + cancel() // simulates FUSE_INTERRUPT from the kernel + + select { + case r := <-done: + if r.errno != syscall.EINTR { + t.Fatalf("expected EINTR after cancel, got errno %v", r.errno) + } + case <-time.After(5 * time.Second): + t.Fatal("roFileHandle.Read did not return after ctx cancel; cancellation is not propagated") + } +} + +// TestStatBlocks verifies that stat(2) on entries in /ipfs populates +// st_blocks (used by du and ls -s) consistent with the file size, and +// that st_blksize advertises the FUSE preferred I/O size. +func TestStatBlocks(t *testing.T) { + nd, mntDir := setupIpfsTest(t, nil) + + t.Run("multi-block file", func(t *testing.T) { + // >1 MiB spans several chunks, so the DAG has multiple leaf links. + fi, data := randObj(t, nd, 1024*1024+1) + require.Greater(t, len(data), 1024*1024) + fusetest.AssertStatBlocks(t, + gopath.Join(mntDir, fi.Cid().String()), + fusemnt.DefaultBlksize) + }) + + t.Run("small single-chunk file", func(t *testing.T) { + // <512 B fits in a single UnixFS chunk with no child links; + // st_blocks still rounds up to 1 so du reports at least 512 B. + fi, _ := randObj(t, nd, 100) + fusetest.AssertStatBlocks(t, + gopath.Join(mntDir, fi.Cid().String()), + fusemnt.DefaultBlksize) + }) + + t.Run("directory", func(t *testing.T) { + // du sums child leaves, so the directory's own st_blocks is not + // arithmetically meaningful. Report a nominal 1 block so tools + // that treat 0 as "unsupported" behave correctly. + child, _ := randObj(t, nd, 100) + + db, err := uio.NewDirectory(nd.DAG) + require.NoError(t, err) + require.NoError(t, db.AddChild(nd.Context(), "f", child)) + dirNode, err := db.GetNode() + require.NoError(t, err) + require.NoError(t, nd.DAG.Add(nd.Context(), dirNode)) + + info, err := os.Stat(gopath.Join(mntDir, dirNode.Cid().String())) + require.NoError(t, err) + st, ok := info.Sys().(*syscall.Stat_t) + require.True(t, ok) + require.EqualValues(t, 1, st.Blocks, "directory should report 1 nominal block") + }) + + t.Run("symlink", func(t *testing.T) { + // UnixFS TSymlink node: Size is the target path length, Blocks + // rounds up to 1 so tools don't see a zero-block symlink. + const target = "hello.txt" + + slData, err := ft.SymlinkData(target) + require.NoError(t, err) + symNode := dag.NodeWithData(slData) + require.NoError(t, nd.DAG.Add(nd.Context(), symNode)) + + db, err := uio.NewDirectory(nd.DAG) + require.NoError(t, err) + require.NoError(t, db.AddChild(nd.Context(), "link", symNode)) + dirNode, err := db.GetNode() + require.NoError(t, err) + require.NoError(t, nd.DAG.Add(nd.Context(), dirNode)) + + linkPath := gopath.Join(mntDir, dirNode.Cid().String(), "link") + info, err := os.Lstat(linkPath) + require.NoError(t, err) + st, ok := info.Sys().(*syscall.Stat_t) + require.True(t, ok) + require.EqualValues(t, len(target), st.Size) + require.EqualValues(t, 1, st.Blocks) + require.EqualValues(t, fusemnt.DefaultBlksize, st.Blksize) + }) +} + +// TestStatfs verifies that statfs on the /ipfs mount reports the disk +// space of the repo's backing filesystem. macOS Finder refuses to copy +// files onto a volume that reports zero free space. +func TestStatfs(t *testing.T) { + nd, err := coremock.NewMockNode() + require.NoError(t, err) + + // Point repoPath at a real directory so Statfs has a valid target. + // (NewMockNode's in-memory repo returns "" for Path().) + repoDir := t.TempDir() + root := &Root{ipfs: nd, repoPath: repoDir} + mntDir := testMount(t, root) + + fusetest.AssertStatfsNonZero(t, mntDir) +} + +// Test that getxattr on an unknown attribute returns ENODATA (Linux) / ENOATTR. +func TestUnknownXattr(t *testing.T) { + nd, _ := setupIpfsTest(t, nil) + + obj, _ := randObj(t, nd, 100) + node := &Node{ipfs: nd, nd: obj} + + dest := make([]byte, 256) + _, errno := node.Getxattr(t.Context(), "user.bogus", dest) + if errno == 0 { + t.Fatal("expected error for unknown xattr, got success") + } +} diff --git a/fuse/readonly/mount_unix.go b/fuse/readonly/mount_unix.go index 33565acd2a6..96bb6b7ac98 100644 --- a/fuse/readonly/mount_unix.go +++ b/fuse/readonly/mount_unix.go @@ -1,19 +1,37 @@ +// Mount/unmount helpers for the /ipfs FUSE mount. go-fuse only builds on linux, darwin, and freebsd. //go:build (linux || darwin || freebsd) && !nofuse package readonly import ( + "os" + + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" + "github.com/ipfs/kubo/config" core "github.com/ipfs/kubo/core" - mount "github.com/ipfs/kubo/fuse/mount" + fusemnt "github.com/ipfs/kubo/fuse/mount" ) // Mount mounts IPFS at a given location, and returns a mount.Mount instance. -func Mount(ipfs *core.IpfsNode, mountpoint string) (mount.Mount, error) { +func Mount(ipfs *core.IpfsNode, mountpoint string) (fusemnt.Mount, error) { cfg, err := ipfs.Repo.Config() if err != nil { return nil, err } - allowOther := cfg.Mounts.FuseAllowOther - fsys := NewFileSystem(ipfs) - return mount.NewMount(fsys, mountpoint, allowOther) + root := NewRoot(ipfs) + opts := &fs.Options{ + NullPermissions: true, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + AttrTimeout: &immutableAttrCacheTime, + EntryTimeout: &immutableAttrCacheTime, + MountOptions: fuse.MountOptions{ + AllowOther: cfg.Mounts.FuseAllowOther.WithDefault(config.DefaultFuseAllowOther), + FsName: "ipfs", + MaxReadAhead: fusemnt.MaxReadAhead, + Debug: os.Getenv("IPFS_FUSE_DEBUG") != "", + }, + } + return fusemnt.NewMount(root, mountpoint, opts) } diff --git a/fuse/readonly/readonly_unix.go b/fuse/readonly/readonly_unix.go index c042628403c..f2b8d72d0f7 100644 --- a/fuse/readonly/readonly_unix.go +++ b/fuse/readonly/readonly_unix.go @@ -1,16 +1,19 @@ +// FUSE filesystem for the read-only /ipfs mount. go-fuse only builds on linux, darwin, and freebsd. //go:build (linux || darwin || freebsd) && !nofuse package readonly import ( "context" - "fmt" "io" "os" + "sync" "syscall" + "time" - fuse "bazil.org/fuse" - fs "bazil.org/fuse/fs" + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" + "github.com/ipfs/boxo/files" mdag "github.com/ipfs/boxo/ipld/merkledag" ft "github.com/ipfs/boxo/ipld/unixfs" uio "github.com/ipfs/boxo/ipld/unixfs/io" @@ -19,220 +22,279 @@ import ( ipld "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" core "github.com/ipfs/kubo/core" - ipldprime "github.com/ipld/go-ipld-prime" + fusemnt "github.com/ipfs/kubo/fuse/mount" cidlink "github.com/ipld/go-ipld-prime/linking/cid" ) var log = logging.Logger("fuse/ipfs") -// FileSystem is the readonly IPFS Fuse Filesystem. -type FileSystem struct { - Ipfs *core.IpfsNode -} +// /ipfs paths are immutable (content-addressed by CID), so the kernel +// can cache attributes and directory entries for as long as it wants. +// var (not const) because fs.Options needs a *time.Duration. +var immutableAttrCacheTime = 365 * 24 * time.Hour -// NewFileSystem constructs new fs using given core.IpfsNode instance. -func NewFileSystem(ipfs *core.IpfsNode) *FileSystem { - return &FileSystem{Ipfs: ipfs} +// Root is the root object of the /ipfs filesystem tree. +type Root struct { + fs.Inode + ipfs *core.IpfsNode + repoPath string } -// Root constructs the Root of the filesystem, a Root object. -func (f FileSystem) Root() (fs.Node, error) { - return &Root{Ipfs: f.Ipfs}, nil +// NewRoot constructs a new readonly root node. +func NewRoot(ipfs *core.IpfsNode) *Root { + return &Root{ipfs: ipfs, repoPath: ipfs.Repo.Path()} } -// Root is the root object of the filesystem tree. -type Root struct { - Ipfs *core.IpfsNode +// Statfs reports disk-space statistics for the underlying filesystem. +// macOS Finder checks free space before copying; without this it +// reports "not enough free space" because go-fuse returns zeroed stats. +func (r *Root) Statfs(_ context.Context, out *fuse.StatfsOut) syscall.Errno { + if r.repoPath == "" { + return 0 + } + var s syscall.Statfs_t + if err := syscall.Statfs(r.repoPath, &s); err != nil { + return fs.ToErrno(err) + } + out.FromStatfsT(&s) + return 0 } -// Attr returns file attributes. -func (*Root) Attr(ctx context.Context, a *fuse.Attr) error { - a.Mode = os.ModeDir | 0o111 // -rw+x - return nil +func (*Root) Getattr(_ context.Context, _ fs.FileHandle, out *fuse.AttrOut) syscall.Errno { + out.Attr.Mode = uint32(fusemnt.NamespaceRootMode.Perm()) + out.SetTimeout(immutableAttrCacheTime) + return 0 } -// Lookup performs a lookup under this node. -func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) { +func (r *Root) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) { log.Debugf("Root Lookup: '%s'", name) switch name { case "mach_kernel", ".hidden", "._.": - // Just quiet some log noise on OS X. - return nil, syscall.Errno(syscall.ENOENT) + return nil, syscall.ENOENT } p, err := path.NewPath("/ipfs/" + name) if err != nil { log.Debugf("fuse failed to parse path: %q: %s", name, err) - return nil, syscall.Errno(syscall.ENOENT) + return nil, syscall.ENOENT } imPath, err := path.NewImmutablePath(p) if err != nil { log.Debugf("fuse failed to convert path: %q: %s", name, err) - return nil, syscall.Errno(syscall.ENOENT) + return nil, syscall.ENOENT } - nd, ndLnk, err := s.Ipfs.UnixFSPathResolver.ResolvePath(ctx, imPath) + nd, ndLnk, err := r.ipfs.UnixFSPathResolver.ResolvePath(ctx, imPath) if err != nil { - // todo: make this error more versatile. - return nil, syscall.Errno(syscall.ENOENT) + return nil, syscall.ENOENT } cidLnk, ok := ndLnk.(cidlink.Link) if !ok { log.Debugf("non-cidlink returned from ResolvePath: %v", ndLnk) - return nil, syscall.Errno(syscall.ENOENT) + return nil, syscall.ENOENT } - // convert ipld-prime node to universal node - blk, err := s.Ipfs.Blockstore.Get(ctx, cidLnk.Cid) + blk, err := r.ipfs.Blockstore.Get(ctx, cidLnk.Cid) if err != nil { log.Debugf("fuse failed to retrieve block: %v: %s", cidLnk, err) - return nil, syscall.Errno(syscall.ENOENT) + return nil, syscall.ENOENT } var fnd ipld.Node switch cidLnk.Cid.Prefix().Codec { case cid.DagProtobuf: - adl, ok := nd.(ipldprime.ADL) - if ok { - substrate := adl.Substrate() - fnd, err = mdag.ProtoNodeConverter(blk, substrate) - } else { - fnd, err = mdag.ProtoNodeConverter(blk, nd) - } + fnd, err = mdag.DecodeProtobuf(blk.RawData()) case cid.Raw: fnd, err = mdag.RawNodeConverter(blk, nd) default: log.Error("fuse node was not a supported type") - return nil, syscall.Errno(syscall.ENOTSUP) + return nil, syscall.ENOTSUP } if err != nil { - log.Errorf("could not convert protobuf or raw node: %s", err) - return nil, syscall.Errno(syscall.ENOENT) + log.Errorf("could not decode block as protobuf or raw node: %s", err) + return nil, syscall.ENOENT } - return &Node{Ipfs: s.Ipfs, Nd: fnd}, nil + child := &Node{ipfs: r.ipfs, nd: fnd} + stable := stableAttrFor(child) + + // Fill attrs in the lookup response so the kernel doesn't cache zeros. + child.fillAttr(&out.Attr) + out.SetEntryTimeout(immutableAttrCacheTime) + out.SetAttrTimeout(immutableAttrCacheTime) + return r.NewInode(ctx, child, stable), 0 } -// ReadDirAll reads a particular directory. Disallowed for root. -func (*Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { - log.Debug("read Root") - return nil, syscall.Errno(syscall.EPERM) +// Readdir on the namespace root is not allowed (execute-only). +func (*Root) Readdir(_ context.Context) (fs.DirStream, syscall.Errno) { + return nil, syscall.EPERM } // Node is the core object representing a filesystem tree node. type Node struct { - Ipfs *core.IpfsNode - Nd ipld.Node + fs.Inode + ipfs *core.IpfsNode + nd ipld.Node cached *ft.FSNode } -func (s *Node) loadData() error { - if pbnd, ok := s.Nd.(*mdag.ProtoNode); ok { +func (n *Node) loadData() error { + if pbnd, ok := n.nd.(*mdag.ProtoNode); ok { fsn, err := ft.FSNodeFromBytes(pbnd.Data()) if err != nil { return err } - s.cached = fsn + n.cached = fsn } return nil } -// Attr returns the attributes of a given node. -func (s *Node) Attr(ctx context.Context, a *fuse.Attr) error { +func (n *Node) Getattr(_ context.Context, _ fs.FileHandle, out *fuse.AttrOut) syscall.Errno { log.Debug("Node attr") - if rawnd, ok := s.Nd.(*mdag.RawNode); ok { - a.Mode = 0o444 + out.SetTimeout(immutableAttrCacheTime) + n.fillAttr(&out.Attr) + return 0 +} + +// Open creates a DagReader that is reused across sequential Read +// calls, avoiding re-traversal of the DAG from the root on each read. +func (n *Node) Open(ctx context.Context, _ uint32) (fs.FileHandle, uint32, syscall.Errno) { + r, err := uio.NewDagReader(ctx, n.nd, n.ipfs.DAG) + if err != nil { + return nil, 0, fusemnt.ReadErrno(err) + } + return &roFileHandle{r: r}, fuse.FOPEN_KEEP_CACHE, 0 +} + +// roFileHandle holds a DagReader for the lifetime of an open file. +// All methods are serialized by mu because the FUSE server dispatches +// each request in its own goroutine and the underlying DagReader is +// not safe for concurrent use. +type roFileHandle struct { + r uio.DagReader + mu sync.Mutex +} + +// fillAttr populates a fuse.Attr from this node's UnixFS metadata. +// Used by both Getattr and Lookup (to fill EntryOut.Attr so the kernel +// doesn't cache zero values for the entry timeout duration). +// +// Blocks and Blksize are set on every entry because go-fuse's setBlocks +// otherwise auto-fills them from Size with a 4 KiB page-based fallback, +// which clobbers the UnixFS-derived values set below. +func (n *Node) fillAttr(a *fuse.Attr) { + a.Blksize = fusemnt.DefaultBlksize + + if rawnd, ok := n.nd.(*mdag.RawNode); ok { + a.Mode = uint32(fusemnt.DefaultFileModeRO.Perm()) a.Size = uint64(len(rawnd.RawData())) - a.Blocks = 1 - return nil + a.Blocks = fusemnt.SizeToStatBlocks(a.Size) + return } - if s.cached == nil { - if err := s.loadData(); err != nil { - return fmt.Errorf("readonly: loadData() failed: %s", err) + if n.cached == nil { + if err := n.loadData(); err != nil { + log.Errorf("readonly: loadData() failed: %s", err) + return } } - switch s.cached.Type() { + + switch n.cached.Type() { case ft.TDirectory, ft.THAMTShard: - a.Mode = os.ModeDir | 0o555 + a.Mode = uint32(fusemnt.DefaultDirModeRO.Perm()) + // Nominal 1 block: du sums child leaves, so the directory's + // own st_blocks is not arithmetically meaningful, but some + // tools treat 0 as "unsupported" and skip the entry. + a.Blocks = 1 case ft.TFile: - size := s.cached.FileSize() - a.Mode = 0o444 - a.Size = uint64(size) - a.Blocks = uint64(len(s.Nd.Links())) + a.Mode = uint32(fusemnt.DefaultFileModeRO.Perm()) + a.Size = n.cached.FileSize() + a.Blocks = fusemnt.SizeToStatBlocks(a.Size) case ft.TRaw: - a.Mode = 0o444 - a.Size = uint64(len(s.cached.Data())) - a.Blocks = uint64(len(s.Nd.Links())) + a.Mode = uint32(fusemnt.DefaultFileModeRO.Perm()) + a.Size = uint64(len(n.cached.Data())) + a.Blocks = fusemnt.SizeToStatBlocks(a.Size) case ft.TSymlink: - a.Mode = 0o777 | os.ModeSymlink - a.Size = uint64(len(s.cached.Data())) + a.Mode = uint32(fusemnt.SymlinkMode.Perm()) + a.Size = uint64(len(n.cached.Data())) + a.Blocks = fusemnt.SizeToStatBlocks(a.Size) default: - return fmt.Errorf("invalid data type - %s", s.cached.Type()) + log.Errorf("invalid data type: %s", n.cached.Type()) + return + } + + // Use mode and mtime from UnixFS metadata when present. + if m := n.cached.Mode(); m != 0 { + a.Mode = files.ModePermsToUnixPerms(m) + } + if t := n.cached.ModTime(); !t.IsZero() { + a.SetTimes(nil, &t, nil) } - return nil } -// Lookup performs a lookup under this node. -func (s *Node) Lookup(ctx context.Context, name string) (fs.Node, error) { +func (n *Node) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) { log.Debugf("Lookup '%s'", name) - link, _, err := uio.ResolveUnixfsOnce(ctx, s.Ipfs.DAG, s.Nd, []string{name}) + link, _, err := uio.ResolveUnixfsOnce(ctx, n.ipfs.DAG, n.nd, []string{name}) switch err { case os.ErrNotExist, mdag.ErrLinkNotFound: - // todo: make this error more versatile. - return nil, syscall.Errno(syscall.ENOENT) + return nil, syscall.ENOENT case nil: - // noop default: log.Errorf("fuse lookup %q: %s", name, err) - return nil, syscall.Errno(syscall.EIO) + return nil, syscall.EIO } - nd, err := s.Ipfs.DAG.Get(ctx, link.Cid) + nd, err := n.ipfs.DAG.Get(ctx, link.Cid) if err != nil && !ipld.IsNotFound(err) { log.Errorf("fuse lookup %q: %s", name, err) - return nil, err + return nil, syscall.EIO } - return &Node{Ipfs: s.Ipfs, Nd: nd}, nil + child := &Node{ipfs: n.ipfs, nd: nd} + stable := stableAttrFor(child) + + child.fillAttr(&out.Attr) + out.SetEntryTimeout(immutableAttrCacheTime) + out.SetAttrTimeout(immutableAttrCacheTime) + return n.NewInode(ctx, child, stable), 0 } -// ReadDirAll reads the link structure as directory entries. -func (s *Node) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { +func (n *Node) Readdir(ctx context.Context) (fs.DirStream, syscall.Errno) { log.Debug("Node ReadDir") - dir, err := uio.NewDirectoryFromNode(s.Ipfs.DAG, s.Nd) + dir, err := uio.NewDirectoryFromNode(n.ipfs.DAG, n.nd) if err != nil { - return nil, err + return nil, fusemnt.ReadErrno(err) } - var entries []fuse.Dirent + var entries []fuse.DirEntry err = dir.ForEachLink(ctx, func(lnk *ipld.Link) error { - n := lnk.Name - if len(n) == 0 { - n = lnk.Cid.String() + name := lnk.Name + if len(name) == 0 { + name = lnk.Cid.String() } - nd, err := s.Ipfs.DAG.Get(ctx, lnk.Cid) + nd, err := n.ipfs.DAG.Get(ctx, lnk.Cid) if err != nil { log.Warn("error fetching directory child node: ", err) + return err } - t := fuse.DT_Unknown + var mode uint32 switch nd := nd.(type) { case *mdag.RawNode: - t = fuse.DT_File + // regular file (mode 0 = S_IFREG) case *mdag.ProtoNode: if fsn, err := ft.FSNodeFromBytes(nd.Data()); err != nil { log.Warn("failed to unmarshal protonode data field:", err) } else { switch fsn.Type() { case ft.TDirectory, ft.THAMTShard: - t = fuse.DT_Dir + mode = syscall.S_IFDIR case ft.TFile, ft.TRaw: - t = fuse.DT_File + // regular file case ft.TSymlink: - t = fuse.DT_Link + mode = syscall.S_IFLNK case ft.TMetadata: log.Error("metadata object in fuse should contain its wrapped type") default: @@ -240,70 +302,109 @@ func (s *Node) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { } } } - entries = append(entries, fuse.Dirent{Name: n, Type: t}) + entries = append(entries, fuse.DirEntry{Name: name, Mode: mode}) return nil }) if err != nil { - return nil, err + return nil, fusemnt.ReadErrno(err) } - if len(entries) > 0 { - return entries, nil - } - return nil, syscall.Errno(syscall.ENOENT) + return fs.NewListDirStream(entries), 0 } -func (s *Node) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { - // TODO: is nil the right response for 'bug off, we ain't got none' ? - resp.Xattr = nil - return nil +func (n *Node) Listxattr(_ context.Context, dest []byte) (uint32, syscall.Errno) { + // Null-terminated list of attribute names. + data := []byte(fusemnt.XattrCID + "\x00") + if len(dest) == 0 { + return uint32(len(data)), 0 + } + if len(dest) < len(data) { + return 0, syscall.ERANGE + } + return uint32(copy(dest, data)), 0 } -func (s *Node) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { - if s.cached == nil || s.cached.Type() != ft.TSymlink { - return "", fuse.Errno(syscall.EINVAL) +func (n *Node) Getxattr(_ context.Context, attr string, dest []byte) (uint32, syscall.Errno) { + if attr == fusemnt.XattrCIDDeprecated { + log.Errorf("xattr %q is deprecated, use %q instead", fusemnt.XattrCIDDeprecated, fusemnt.XattrCID) + attr = fusemnt.XattrCID + } + if attr != fusemnt.XattrCID { + return 0, fs.ENOATTR + } + data := []byte(n.nd.Cid().String()) + if len(dest) == 0 { + return uint32(len(data)), 0 } - return string(s.cached.Data()), nil + if len(dest) < len(data) { + return 0, syscall.ERANGE + } + return uint32(copy(dest, data)), 0 } -func (s *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - r, err := uio.NewDagReader(ctx, s.Nd, s.Ipfs.DAG) - if err != nil { - return err +func (n *Node) Readlink(_ context.Context) ([]byte, syscall.Errno) { + if n.cached == nil || n.cached.Type() != ft.TSymlink { + return nil, syscall.EINVAL } - _, err = r.Seek(req.Offset, io.SeekStart) - if err != nil { - return err + return n.cached.Data(), 0 +} + +func (fh *roFileHandle) Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) { + fh.mu.Lock() + defer fh.mu.Unlock() + + if _, err := fh.r.Seek(off, io.SeekStart); err != nil { + return nil, fusemnt.ReadErrno(err) } - // Data has a capacity of Size - buf := resp.Data[:int(req.Size)] - n, err := io.ReadFull(r, buf) - resp.Data = buf[:n] + n, err := fh.r.CtxReadFull(ctx, dest) switch err { case nil, io.EOF, io.ErrUnexpectedEOF: default: - return err + return nil, fusemnt.ReadErrno(err) } - resp.Data = resp.Data[:n] - return nil // may be non-nil / not succeeded + return fuse.ReadResultData(dest[:n]), 0 } -// to check that our Node implements all the interfaces we want. -type roRoot interface { - fs.Node - fs.HandleReadDirAller - fs.NodeStringLookuper -} +func (fh *roFileHandle) Release(_ context.Context) syscall.Errno { + fh.mu.Lock() + defer fh.mu.Unlock() -var _ roRoot = (*Root)(nil) + return fs.ToErrno(fh.r.Close()) +} -type roNode interface { - fs.HandleReadDirAller - fs.HandleReader - fs.Node - fs.NodeStringLookuper - fs.NodeReadlinker - fs.NodeGetxattrer +// stableAttrFor returns the StableAttr (file type bits) for a Node. +func stableAttrFor(n *Node) fs.StableAttr { + if _, ok := n.nd.(*mdag.RawNode); ok { + return fs.StableAttr{} // S_IFREG + } + if n.cached == nil { + _ = n.loadData() + } + if n.cached != nil { + switch n.cached.Type() { + case ft.TDirectory, ft.THAMTShard: + return fs.StableAttr{Mode: syscall.S_IFDIR} + case ft.TSymlink: + return fs.StableAttr{Mode: syscall.S_IFLNK} + } + } + return fs.StableAttr{} // S_IFREG } -var _ roNode = (*Node)(nil) +// Interface checks. +var ( + _ fs.NodeGetattrer = (*Root)(nil) + _ fs.NodeLookuper = (*Root)(nil) + _ fs.NodeReaddirer = (*Root)(nil) + _ fs.NodeStatfser = (*Root)(nil) + _ fs.NodeGetattrer = (*Node)(nil) + _ fs.NodeLookuper = (*Node)(nil) + _ fs.NodeOpener = (*Node)(nil) + _ fs.NodeReaddirer = (*Node)(nil) + _ fs.NodeReadlinker = (*Node)(nil) + _ fs.NodeGetxattrer = (*Node)(nil) + _ fs.NodeListxattrer = (*Node)(nil) + + _ fs.FileReader = (*roFileHandle)(nil) + _ fs.FileReleaser = (*roFileHandle)(nil) +) diff --git a/fuse/writable/writable.go b/fuse/writable/writable.go new file mode 100644 index 00000000000..8a5c7e89cb9 --- /dev/null +++ b/fuse/writable/writable.go @@ -0,0 +1,816 @@ +// Package writable implements FUSE filesystem types shared by the +// mutable /mfs and /ipns mounts. Both mounts expose MFS directories +// as writable POSIX filesystems; the only differences are how the +// root is created and how xattr names are published. +// +//go:build (linux || darwin || freebsd) && !nofuse + +package writable + +import ( + "context" + "io" + "os" + "sync" + "syscall" + "time" + + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" + + "github.com/ipfs/boxo/files" + dag "github.com/ipfs/boxo/ipld/merkledag" + ft "github.com/ipfs/boxo/ipld/unixfs" + uio "github.com/ipfs/boxo/ipld/unixfs/io" + "github.com/ipfs/boxo/mfs" + ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log/v2" + fusemnt "github.com/ipfs/kubo/fuse/mount" +) + +var log = logging.Logger("fuse/writable") + +// Config controls write-side behavior for writable mounts. +type Config struct { + StoreMtime bool // persist mtime on create and open-for-write + StoreMode bool // persist mode on chmod + DAG ipld.DAGService // required: read-only opens use it to bypass MFS desclock + // RepoPath is the on-disk path of the IPFS repo (e.g. ~/.ipfs). + // Statfs calls syscall.Statfs on this path so that the FUSE mount + // reports how much free space is left on the volume that stores + // MFS data. Without it tools like macOS Finder see zero free space + // and refuse to copy files. + RepoPath string + // Blksize is the preferred I/O size advertised via st_blksize on + // every stat. Callers should derive it from Import.UnixFSChunker via + // fusemnt.BlksizeFromChunker so the hint matches the chunker MFS + // will use for writes. If zero, NewDir writes fusemnt.DefaultBlksize + // into this field in place, so fillAttr on every inode can read + // cfg.Blksize without a nil-check on each stat. + Blksize uint32 +} + +// NewDir creates a Dir node backed by the given MFS directory. +// cfg.DAG is required: read-only file opens build a DagReader directly +// from it to avoid MFS's desclock (see FileInode.Open). Passing a nil +// DAG would silently re-introduce the rsync --inplace deadlock, so we +// fail loudly at construction time instead. +func NewDir(d *mfs.Directory, cfg *Config) *Dir { + if cfg == nil || cfg.DAG == nil { + panic("fuse/writable: Config.DAG is required") + } + // Tests and callers that don't plumb Import.UnixFSChunker leave + // Blksize zero; fall back to the FUSE default so stat advertises a + // usable st_blksize. See Config.Blksize for why we mutate in place. + if cfg.Blksize == 0 { + cfg.Blksize = fusemnt.DefaultBlksize + } + return &Dir{MFSDir: d, Cfg: cfg} +} + +// Dir is the FUSE adapter for MFS directories. +type Dir struct { + fs.Inode + MFSDir *mfs.Directory + Cfg *Config +} + +// fillAttr fills stat attributes for a directory. Blocks and Blksize +// are set explicitly because go-fuse's setBlocks otherwise auto-fills +// them from Size with a 4 KiB page-based fallback. For directories +// Size is 0, so the fallback yields st_blocks=0, which some tools +// (dedup scanners, file managers) treat as "unsupported". +func (d *Dir) fillAttr(a *fuse.Attr) { + a.Mode = uint32(fusemnt.DefaultDirModeRW.Perm()) + a.Blocks = 1 + a.Blksize = d.Cfg.Blksize + if m, err := d.MFSDir.Mode(); err == nil && m != 0 { + a.Mode = files.ModePermsToUnixPerms(m) + } + if t, err := d.MFSDir.ModTime(); err == nil && !t.IsZero() { + a.SetTimes(nil, &t, nil) + } +} + +func (d *Dir) Getattr(_ context.Context, _ fs.FileHandle, out *fuse.AttrOut) syscall.Errno { + d.fillAttr(&out.Attr) + return 0 +} + +// Statfs reports disk-space statistics for the underlying filesystem. +// macOS Finder checks free space before copying; without this it +// reports "not enough free space" because go-fuse returns zeroed stats. +func (d *Dir) Statfs(_ context.Context, out *fuse.StatfsOut) syscall.Errno { + if d.Cfg.RepoPath == "" { + return 0 + } + var s syscall.Statfs_t + if err := syscall.Statfs(d.Cfg.RepoPath, &s); err != nil { + return fs.ToErrno(err) + } + out.FromStatfsT(&s) + return 0 +} + +// Setattr handles chmod and mtime changes on directories. +// Tools like tar and rsync set directory timestamps after extraction. +// +// Mode and mtime are stored as UnixFS optional metadata. +// The UnixFS spec supports all 12 permission bits, but boxo's MFS +// layer exposes only the lower 9 (ugo-rwx); setuid/setgid/sticky +// are silently dropped. FUSE mounts are always nosuid so these +// bits would have no execution effect anyway. +// See https://specs.ipfs.tech/unixfs/#dag-pb-optional-metadata +func (d *Dir) Setattr(_ context.Context, _ fs.FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno { + if mode, ok := in.GetMode(); ok && d.Cfg.StoreMode { + if err := d.MFSDir.SetMode(files.UnixPermsToModePerms(mode)); err != nil { + return fs.ToErrno(err) + } + } + if mtime, ok := in.GetMTime(); ok && d.Cfg.StoreMtime { + if err := d.MFSDir.SetModTime(mtime); err != nil { + return fs.ToErrno(err) + } + } + d.fillAttr(&out.Attr) + return 0 +} + +func (d *Dir) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) { + mfsNode, err := d.MFSDir.Child(name) + if err != nil { + return nil, syscall.ENOENT + } + + switch mfsNode.Type() { + case mfs.TDir: + child := &Dir{MFSDir: mfsNode.(*mfs.Directory), Cfg: d.Cfg} + child.fillAttr(&out.Attr) + return d.NewInode(ctx, child, fs.StableAttr{Mode: syscall.S_IFDIR}), 0 + case mfs.TFile: + mfsFile := mfsNode.(*mfs.File) + if target := SymlinkTarget(mfsFile); target != "" { + child := &Symlink{Target: target, MFSFile: mfsFile, Cfg: d.Cfg} + child.fillAttr(&out.Attr) + return d.NewInode(ctx, child, fs.StableAttr{Mode: syscall.S_IFLNK}), 0 + } + child := &FileInode{MFSFile: mfsFile, Cfg: d.Cfg} + child.fillAttr(&out.Attr) + return d.NewInode(ctx, child, fs.StableAttr{}), 0 + default: + log.Errorf("unexpected MFS node type %d under directory", mfsNode.Type()) + return nil, syscall.EIO + } +} + +func (d *Dir) Readdir(ctx context.Context) (fs.DirStream, syscall.Errno) { + nodes, err := d.MFSDir.List(ctx) + if err != nil { + return nil, fs.ToErrno(err) + } + + entries := make([]fuse.DirEntry, len(nodes)) + for i, node := range nodes { + var mode uint32 + switch { + case node.Type == int(mfs.TDir): + mode = syscall.S_IFDIR + case node.Type == int(mfs.TFile): + // MFS represents symlinks as TFile; check the DAG node. + if child, err := d.MFSDir.Child(node.Name); err == nil { + if f, ok := child.(*mfs.File); ok && SymlinkTarget(f) != "" { + mode = syscall.S_IFLNK + } + } + } + entries[i] = fuse.DirEntry{Name: node.Name, Mode: mode} + } + return fs.NewListDirStream(entries), 0 +} + +// Mkdir creates a new directory under d. +// +// TODO: boxo's mfs.Directory.Mkdir(name string) accepts no mode +// argument, so the caller's mode is silently dropped here. Tools +// that mkdir then chown without a follow-up chmod (some tar/rsync +// flows) see the default 0755 instead of the requested mode. +// Fixing this requires a boxo MFS API change. +func (d *Dir) Mkdir(ctx context.Context, name string, _ uint32, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) { + mfsDir, err := d.MFSDir.Mkdir(name) + if err != nil { + return nil, fs.ToErrno(err) + } + child := &Dir{MFSDir: mfsDir, Cfg: d.Cfg} + // Fill the response attrs so the kernel doesn't cache zero values + // until AttrTimeout expires. Matches Dir.Create and FileInode.Setattr. + child.fillAttr(&out.Attr) + return d.NewInode(ctx, child, fs.StableAttr{Mode: syscall.S_IFDIR}), 0 +} + +func (d *Dir) Unlink(_ context.Context, name string) syscall.Errno { + if err := d.MFSDir.Unlink(name); err != nil { + return fs.ToErrno(err) + } + return fs.ToErrno(d.MFSDir.Flush()) +} + +func (d *Dir) Rmdir(ctx context.Context, name string) syscall.Errno { + child, err := d.MFSDir.Child(name) + if err != nil { + return fs.ToErrno(err) + } + target, ok := child.(*mfs.Directory) + if !ok { + return syscall.ENOTDIR + } + + children, err := target.ListNames(ctx) + if err != nil { + return fs.ToErrno(err) + } + if len(children) > 0 { + return syscall.ENOTEMPTY + } + + if err := d.MFSDir.Unlink(name); err != nil { + return fs.ToErrno(err) + } + return fs.ToErrno(d.MFSDir.Flush()) +} + +// Rename moves an entry across MFS directories. +// +// TODO: this is not atomic. The source is unlinked before the +// destination is added, so any failure between the two steps loses +// the source entry. Making it atomic requires changes to MFS rename +// semantics (boxo/mfs does not currently expose an atomic rename). +func (d *Dir) Rename(_ context.Context, oldName string, newParent fs.InodeEmbedder, newName string, _ uint32) syscall.Errno { + child, err := d.MFSDir.Child(oldName) + if err != nil { + return fs.ToErrno(err) + } + + nd, err := child.GetNode() + if err != nil { + return fs.ToErrno(err) + } + + // Unlink the source first. For same-directory renames, this clears + // the old name from the directory's entry cache before AddChild + // repopulates it with the new name. Without this ordering, Flush + // would sync the stale cache entry back into the DAG. + if err := d.MFSDir.Unlink(oldName); err != nil { + return fs.ToErrno(err) + } + + targetDir, ok := newParent.EmbeddedInode().Operations().(*Dir) + if !ok { + return syscall.EINVAL + } + if err := targetDir.MFSDir.Unlink(newName); err != nil && err != os.ErrNotExist { + return fs.ToErrno(err) + } + if err := targetDir.MFSDir.AddChild(newName, nd); err != nil { + return fs.ToErrno(err) + } + + return fs.ToErrno(d.MFSDir.Flush()) +} + +func (d *Dir) Create(ctx context.Context, name string, flags uint32, _ uint32, out *fuse.EntryOut) (*fs.Inode, fs.FileHandle, uint32, syscall.Errno) { + node := dag.NodeWithData(ft.FilePBData(nil, 0)) + if err := node.SetCidBuilder(d.MFSDir.GetCidBuilder()); err != nil { + return nil, nil, 0, fs.ToErrno(err) + } + + if err := d.MFSDir.AddChild(name, node); err != nil { + return nil, nil, 0, fs.ToErrno(err) + } + + if err := d.MFSDir.Flush(); err != nil { + return nil, nil, 0, fs.ToErrno(err) + } + + mfsNode, err := d.MFSDir.Child(name) + if err != nil { + return nil, nil, 0, fs.ToErrno(err) + } + if d.Cfg.StoreMtime { + if err := mfsNode.SetModTime(time.Now()); err != nil { + return nil, nil, 0, fs.ToErrno(err) + } + } + + mfsFile, ok := mfsNode.(*mfs.File) + if !ok { + return nil, nil, 0, syscall.EIO + } + fileInode := &FileInode{MFSFile: mfsFile, Cfg: d.Cfg} + + accessMode := flags & syscall.O_ACCMODE + fd, err := mfsFile.Open(mfs.Flags{ + Read: accessMode == syscall.O_RDONLY || accessMode == syscall.O_RDWR, + Write: accessMode == syscall.O_WRONLY || accessMode == syscall.O_RDWR, + Sync: true, + }) + if err != nil { + return nil, nil, 0, fs.ToErrno(err) + } + + // Fill the response attrs so the kernel doesn't cache zero values + // (mode 0, size 0) for the new inode until AttrTimeout expires. + // fstat on the open file handle returned to the caller hits this + // cache, so leaving it empty makes f.Stat() report mode 0 right + // after open. Matches FileInode.Setattr and Dir.Mkdir. + fileInode.fillAttr(&out.Attr) + + inode := d.NewInode(ctx, fileInode, fs.StableAttr{}) + return inode, &FileHandle{inode: inode, fd: fd}, 0, 0 +} + +func (d *Dir) Listxattr(_ context.Context, dest []byte) (uint32, syscall.Errno) { + data := []byte(fusemnt.XattrCID + "\x00") + if len(dest) == 0 { + return uint32(len(data)), 0 + } + if len(dest) < len(data) { + return 0, syscall.ERANGE + } + return uint32(copy(dest, data)), 0 +} + +func (d *Dir) Getxattr(_ context.Context, attr string, dest []byte) (uint32, syscall.Errno) { + if attr == fusemnt.XattrCIDDeprecated { + log.Errorf("xattr %q is deprecated, use %q instead", fusemnt.XattrCIDDeprecated, fusemnt.XattrCID) + attr = fusemnt.XattrCID + } + if attr != fusemnt.XattrCID { + return 0, fs.ENOATTR + } + nd, err := d.MFSDir.GetNode() + if err != nil { + return 0, fs.ToErrno(err) + } + data := []byte(nd.Cid().String()) + if len(dest) == 0 { + return uint32(len(data)), 0 + } + if len(dest) < len(data) { + return 0, syscall.ERANGE + } + return uint32(copy(dest, data)), 0 +} + +// Symlink creates a new symlink in this directory. +func (d *Dir) Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) { + data, err := ft.SymlinkData(target) + if err != nil { + return nil, fs.ToErrno(err) + } + nd := dag.NodeWithData(data) + if err := nd.SetCidBuilder(d.MFSDir.GetCidBuilder()); err != nil { + return nil, fs.ToErrno(err) + } + if err := d.MFSDir.AddChild(name, nd); err != nil { + return nil, fs.ToErrno(err) + } + if err := d.MFSDir.Flush(); err != nil { + return nil, fs.ToErrno(err) + } + + // Retrieve the mfs.File so Setattr can persist mtime. + mfsNode, err := d.MFSDir.Child(name) + if err != nil { + return nil, fs.ToErrno(err) + } + mfsFile, _ := mfsNode.(*mfs.File) + + sym := &Symlink{Target: target, MFSFile: mfsFile, Cfg: d.Cfg} + sym.fillAttr(&out.Attr) + return d.NewInode(ctx, sym, fs.StableAttr{Mode: syscall.S_IFLNK}), 0 +} + +// FileInode is the FUSE adapter for MFS file inodes. +type FileInode struct { + fs.Inode + MFSFile *mfs.File + Cfg *Config +} + +func (fi *FileInode) fillAttr(a *fuse.Attr) { + size, _ := fi.MFSFile.Size() + a.Size = uint64(size) + a.Blocks = fusemnt.SizeToStatBlocks(a.Size) + a.Blksize = fi.Cfg.Blksize + a.Mode = uint32(fusemnt.DefaultFileModeRW.Perm()) + if m, err := fi.MFSFile.Mode(); err == nil && m != 0 { + a.Mode = files.ModePermsToUnixPerms(m) + } + if t, _ := fi.MFSFile.ModTime(); !t.IsZero() { + a.SetTimes(nil, &t, nil) + } +} + +func (fi *FileInode) Getattr(_ context.Context, _ fs.FileHandle, out *fuse.AttrOut) syscall.Errno { + fi.fillAttr(&out.Attr) + return 0 +} + +func (fi *FileInode) Open(ctx context.Context, flags uint32) (fs.FileHandle, uint32, syscall.Errno) { + accessMode := flags & syscall.O_ACCMODE + + // Read-only opens bypass MFS's desclock by creating a DagReader + // directly from the current DAG node. MFS holds desclock.RLock + // for the lifetime of a read descriptor, which blocks any + // concurrent write open on the same file (desclock.Lock). Tools + // like rsync --inplace open the destination for reading and + // writing simultaneously, deadlocking on MFS's lock. Creating + // a DagReader here avoids the lock entirely: the reader gets a + // snapshot of the file at open time, and writers proceed through + // MFS independently. Cfg.DAG is required by NewDir. + if accessMode == syscall.O_RDONLY { + nd, err := fi.MFSFile.GetNode() + if err != nil { + return nil, 0, fs.ToErrno(err) + } + r, err := uio.NewDagReader(ctx, nd, fi.Cfg.DAG) + if err != nil { + return nil, 0, fusemnt.ReadErrno(err) + } + return &roFileHandle{r: r}, fuse.FOPEN_KEEP_CACHE, 0 + } + + mfsFlags := mfs.Flags{ + Read: accessMode == syscall.O_RDONLY || accessMode == syscall.O_RDWR, + Write: accessMode == syscall.O_WRONLY || accessMode == syscall.O_RDWR, + Sync: true, + } + fd, err := fi.MFSFile.Open(mfsFlags) + if err != nil { + return nil, 0, fs.ToErrno(err) + } + + if flags&syscall.O_TRUNC != 0 { + if !mfsFlags.Write { + fd.Close() + log.Error("tried to open a readonly file with truncate") + return nil, 0, syscall.ENOTSUP + } + if err := fd.Truncate(0); err != nil { + fd.Close() + return nil, 0, fs.ToErrno(err) + } + } + // O_APPEND is handled in FileHandle.Write by seeking to end. + + if mfsFlags.Write && fi.Cfg.StoreMtime { + if err := fi.MFSFile.SetModTime(time.Now()); err != nil { + fd.Close() + return nil, 0, fs.ToErrno(err) + } + } + + return &FileHandle{inode: fi.EmbeddedInode(), fd: fd, appendMode: flags&syscall.O_APPEND != 0}, 0, 0 +} + +// Setattr handles chmod, mtime changes (touch), and ftruncate. +// +// Mode and mtime are stored as UnixFS optional metadata. +// The UnixFS spec supports all 12 permission bits, but boxo's MFS +// layer exposes only the lower 9 (ugo-rwx); setuid/setgid/sticky +// are silently dropped. FUSE mounts are always nosuid so these +// bits would have no execution effect anyway. +// See https://specs.ipfs.tech/unixfs/#dag-pb-optional-metadata +// +// With hanwen/go-fuse, the kernel passes the open file handle (fh) when +// the caller uses ftruncate(fd, size). This lets us truncate through +// the existing write descriptor without opening a second one. For +// truncate(path, size) without a handle, a temporary descriptor is +// opened; this may block if another writer holds MFS's desclock. +func (fi *FileInode) Setattr(_ context.Context, fh fs.FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno { + if sz, ok := in.GetSize(); ok { + if f, ok := fh.(*FileHandle); ok { + // ftruncate(fd, size): use the existing write descriptor. + f.mu.Lock() + err := f.fd.Truncate(int64(sz)) + f.mu.Unlock() + if err != nil { + return fs.ToErrno(err) + } + } else { + // truncate(path, size) without an open file descriptor. + // Open a temporary write descriptor, truncate, flush, and + // close. This may block if another writer holds MFS's + // desclock; the FUSE kernel timeout (30s) bounds the wait. + fd, err := fi.MFSFile.Open(mfs.Flags{Write: true, Sync: true}) + if err != nil { + return fs.ToErrno(err) + } + if err := fd.Truncate(int64(sz)); err != nil { + fd.Close() + return fs.ToErrno(err) + } + if err := fd.Flush(); err != nil { + fd.Close() + return fs.ToErrno(err) + } + if err := fd.Close(); err != nil { + return fs.ToErrno(err) + } + } + } + if mode, ok := in.GetMode(); ok && fi.Cfg.StoreMode { + if err := fi.MFSFile.SetMode(files.UnixPermsToModePerms(mode)); err != nil { + return fs.ToErrno(err) + } + } + if mtime, ok := in.GetMTime(); ok && fi.Cfg.StoreMtime { + if err := fi.MFSFile.SetModTime(mtime); err != nil { + return fs.ToErrno(err) + } + } + // Fill the response attrs so the kernel doesn't cache stale zero + // values until AttrTimeout expires. Matches Dir.Setattr behavior. + fi.fillAttr(&out.Attr) + return 0 +} + +func (fi *FileInode) Listxattr(_ context.Context, dest []byte) (uint32, syscall.Errno) { + data := []byte(fusemnt.XattrCID + "\x00") + if len(dest) == 0 { + return uint32(len(data)), 0 + } + if len(dest) < len(data) { + return 0, syscall.ERANGE + } + return uint32(copy(dest, data)), 0 +} + +func (fi *FileInode) Getxattr(_ context.Context, attr string, dest []byte) (uint32, syscall.Errno) { + if attr == fusemnt.XattrCIDDeprecated { + log.Errorf("xattr %q is deprecated, use %q instead", fusemnt.XattrCIDDeprecated, fusemnt.XattrCID) + attr = fusemnt.XattrCID + } + if attr != fusemnt.XattrCID { + return 0, fs.ENOATTR + } + nd, err := fi.MFSFile.GetNode() + if err != nil { + return 0, fs.ToErrno(err) + } + data := []byte(nd.Cid().String()) + if len(dest) == 0 { + return uint32(len(data)), 0 + } + if len(dest) < len(data) { + return 0, syscall.ERANGE + } + return uint32(copy(dest, data)), 0 +} + +// FileHandle wraps an MFS file descriptor for FUSE operations. +// All methods are serialized by mu because the FUSE server dispatches +// each request in its own goroutine and the underlying DagModifier +// is not safe for concurrent use. +type FileHandle struct { + inode *fs.Inode // back-pointer for kernel cache invalidation + fd mfs.FileDescriptor + mu sync.Mutex + appendMode bool // O_APPEND: writes always go to end of file +} + +func (fh *FileHandle) Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) { + fh.mu.Lock() + defer fh.mu.Unlock() + + if _, err := fh.fd.Seek(off, io.SeekStart); err != nil { + return nil, fs.ToErrno(err) + } + + size, err := fh.fd.Size() + if err != nil { + return nil, fs.ToErrno(err) + } + + n := min(len(dest), int(size-off)) + if n <= 0 { + return fuse.ReadResultData(nil), 0 + } + got, err := fh.fd.CtxReadFull(ctx, dest[:n]) + if err != nil { + return nil, fusemnt.ReadErrno(err) + } + return fuse.ReadResultData(dest[:got]), 0 +} + +func (fh *FileHandle) Write(_ context.Context, data []byte, off int64) (uint32, syscall.Errno) { + fh.mu.Lock() + defer fh.mu.Unlock() + + if fh.appendMode { + // O_APPEND: the kernel may send offset 0, but POSIX says + // writes must go to the end of the file. + if _, err := fh.fd.Seek(0, io.SeekEnd); err != nil { + return 0, fs.ToErrno(err) + } + n, err := fh.fd.Write(data) + if err != nil { + return 0, fs.ToErrno(err) + } + return uint32(n), 0 + } + + n, err := fh.fd.WriteAt(data, off) + if err != nil { + return 0, fs.ToErrno(err) + } + return uint32(n), 0 +} + +// Flush persists buffered writes to the DAG and invalidates the +// kernel's cached attrs so the next stat sees the updated size. +// +// We intentionally ignore ctx: the underlying MFS flush cannot be +// safely canceled mid-operation, and abandoning it would leak a +// background goroutine that races with the subsequent Release. +// +// Cache invalidation happens here (in addition to Release) because +// the kernel calls Flush synchronously inside close() but sends +// Release asynchronously after close() returns. Without this, a +// stat() immediately after close() could see stale cached attrs. +func (fh *FileHandle) Flush(_ context.Context) syscall.Errno { + fh.mu.Lock() + defer fh.mu.Unlock() + + err := fh.fd.Flush() + if fh.inode != nil { + _ = fh.inode.NotifyContent(0, 0) + } + return fs.ToErrno(err) +} + +// Release closes the descriptor and invalidates the kernel's cached +// content and attrs so readers opening the same path see the new data. +// Invalidation happens here (not in Flush) because fd.Close commits +// the final DAG node; Flush alone may not have the final size yet. +func (fh *FileHandle) Release(_ context.Context) syscall.Errno { + fh.mu.Lock() + defer fh.mu.Unlock() + + err := fh.fd.Close() + if fh.inode != nil { + _ = fh.inode.NotifyContent(0, 0) + } + return fs.ToErrno(err) +} + +// Fsync flushes the write buffer through the open file descriptor and +// invalidates the kernel's cached attrs and content for this inode. +// Editors (vim, emacs) and databases call fsync after writing to +// ensure data reaches persistent storage; a fresh reader on the same +// path must see the synced bytes immediately, not the size the kernel +// cached from the initial Create response. +func (fh *FileHandle) Fsync(_ context.Context, _ uint32) syscall.Errno { + fh.mu.Lock() + defer fh.mu.Unlock() + + err := fh.fd.Flush() + if fh.inode != nil { + _ = fh.inode.NotifyContent(0, 0) + } + return fs.ToErrno(err) +} + +// Symlink is the FUSE adapter for UnixFS TSymlink nodes on writable mounts. +// Target is resolved once at Lookup/Create time and never changes +// (POSIX symlinks are immutable; changing the target requires unlink + symlink). +type Symlink struct { + fs.Inode + Target string + MFSFile *mfs.File // backing MFS node for mtime persistence + Cfg *Config +} + +func (s *Symlink) Readlink(_ context.Context) ([]byte, syscall.Errno) { + return []byte(s.Target), 0 +} + +func (s *Symlink) fillAttr(a *fuse.Attr) { + a.Mode = uint32(fusemnt.SymlinkMode.Perm()) + a.Size = uint64(len(s.Target)) + a.Blocks = fusemnt.SizeToStatBlocks(a.Size) + a.Blksize = s.Cfg.Blksize + if s.MFSFile != nil { + if t, err := s.MFSFile.ModTime(); err == nil && !t.IsZero() { + a.SetTimes(nil, &t, nil) + } + } +} + +func (s *Symlink) Getattr(_ context.Context, _ fs.FileHandle, out *fuse.AttrOut) syscall.Errno { + s.fillAttr(&out.Attr) + return 0 +} + +// Setattr handles mtime changes on symlinks. +// Tools like rsync call lutimes on symlinks after creating them and +// treat ENOTSUP as an error. Every major FUSE filesystem (gocryptfs, +// rclone, sshfs, s3fs) implements Setattr on symlinks for this reason. +// +// Mode is always 0777 per POSIX convention (access control uses the +// target's mode), so chmod requests are silently accepted but not stored. +func (s *Symlink) Setattr(_ context.Context, _ fs.FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno { + if s.MFSFile != nil { + if mtime, ok := in.GetMTime(); ok && s.Cfg.StoreMtime { + if err := s.MFSFile.SetModTime(mtime); err != nil { + return fs.ToErrno(err) + } + } + } + s.fillAttr(&out.Attr) + return 0 +} + +// roFileHandle is a read-only file handle backed by a DagReader. +// Used for O_RDONLY opens to bypass MFS's desclock (see FileInode.Open). +type roFileHandle struct { + r uio.DagReader + mu sync.Mutex +} + +func (fh *roFileHandle) Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) { + fh.mu.Lock() + defer fh.mu.Unlock() + + if _, err := fh.r.Seek(off, io.SeekStart); err != nil { + return nil, fs.ToErrno(err) + } + n, err := fh.r.CtxReadFull(ctx, dest) + switch err { + case nil, io.EOF, io.ErrUnexpectedEOF: + default: + return nil, fusemnt.ReadErrno(err) + } + return fuse.ReadResultData(dest[:n]), 0 +} + +func (fh *roFileHandle) Release(_ context.Context) syscall.Errno { + fh.mu.Lock() + defer fh.mu.Unlock() + + return fs.ToErrno(fh.r.Close()) +} + +// SymlinkTarget extracts the symlink target from an MFS file, or +// returns "" if the file is not a TSymlink node. MFS represents +// symlinks as *mfs.File, so the DAG node's UnixFS type must be checked. +func SymlinkTarget(f *mfs.File) string { + nd, err := f.GetNode() + if err != nil { + return "" + } + fsn, err := ft.ExtractFSNode(nd) + if err != nil { + return "" + } + if fsn.Type() != ft.TSymlink { + return "" + } + return string(fsn.Data()) +} + +// Interface compliance checks. +var ( + _ fs.NodeGetattrer = (*Dir)(nil) + _ fs.NodeStatfser = (*Dir)(nil) + _ fs.NodeSetattrer = (*Dir)(nil) + _ fs.NodeLookuper = (*Dir)(nil) + _ fs.NodeReaddirer = (*Dir)(nil) + _ fs.NodeMkdirer = (*Dir)(nil) + _ fs.NodeUnlinker = (*Dir)(nil) + _ fs.NodeRmdirer = (*Dir)(nil) + _ fs.NodeRenamer = (*Dir)(nil) + _ fs.NodeCreater = (*Dir)(nil) + _ fs.NodeSymlinker = (*Dir)(nil) + _ fs.NodeGetxattrer = (*Dir)(nil) + _ fs.NodeListxattrer = (*Dir)(nil) + + _ fs.NodeGetattrer = (*FileInode)(nil) + _ fs.NodeOpener = (*FileInode)(nil) + _ fs.NodeSetattrer = (*FileInode)(nil) + _ fs.NodeGetxattrer = (*FileInode)(nil) + _ fs.NodeListxattrer = (*FileInode)(nil) + + _ fs.NodeGetattrer = (*Symlink)(nil) + _ fs.NodeSetattrer = (*Symlink)(nil) + _ fs.NodeReadlinker = (*Symlink)(nil) + + _ fs.FileReader = (*FileHandle)(nil) + _ fs.FileWriter = (*FileHandle)(nil) + _ fs.FileFlusher = (*FileHandle)(nil) + _ fs.FileReleaser = (*FileHandle)(nil) + _ fs.FileFsyncer = (*FileHandle)(nil) + + _ fs.FileReader = (*roFileHandle)(nil) + _ fs.FileReleaser = (*roFileHandle)(nil) +) diff --git a/fuse/writable/writable_test.go b/fuse/writable/writable_test.go new file mode 100644 index 00000000000..a634aaaa8f4 --- /dev/null +++ b/fuse/writable/writable_test.go @@ -0,0 +1,105 @@ +//go:build (linux || darwin || freebsd) && !nofuse + +package writable + +import ( + "testing" + + "github.com/hanwen/go-fuse/v2/fuse" + dag "github.com/ipfs/boxo/ipld/merkledag" + fusemnt "github.com/ipfs/kubo/fuse/mount" +) + +// TestNewDirNormalizesBlksize verifies that callers who don't plumb +// Import.UnixFSChunker through (e.g. test-only mounts) get the FUSE +// default so stat still advertises a usable st_blksize. +func TestNewDirNormalizesBlksize(t *testing.T) { + t.Run("zero falls back to DefaultBlksize", func(t *testing.T) { + cfg := &Config{DAG: dag.NewDAGService(nil)} + NewDir(nil, cfg) + if cfg.Blksize != fusemnt.DefaultBlksize { + t.Fatalf("Blksize = %d, want DefaultBlksize (%d)", + cfg.Blksize, fusemnt.DefaultBlksize) + } + }) + + t.Run("explicit value passes through", func(t *testing.T) { + cfg := &Config{DAG: dag.NewDAGService(nil), Blksize: 65536} + NewDir(nil, cfg) + if cfg.Blksize != 65536 { + t.Fatalf("Blksize = %d, want 65536", cfg.Blksize) + } + }) +} + +// TestSymlinkSetattrChmodNoError verifies that Setattr on a symlink +// with only a mode change is silently accepted. POSIX symlinks have no +// meaningful permission bits (access control uses the target's mode), +// so handlers must not return an error when the kernel forwards a +// chmod-on-symlink request (e.g. via BSD lchmod or fchmodat with +// AT_SYMLINK_NOFOLLOW). Tools like rsync depend on this contract. +// +// This is a unit test rather than an integration test because Linux +// usually rejects fchmodat(AT_SYMLINK_NOFOLLOW) at the VFS layer with +// EOPNOTSUPP and never forwards it to the FUSE filesystem, so a +// userspace test would not actually exercise this code path. +func TestSymlinkSetattrChmodNoError(t *testing.T) { + // MFSFile is nil: Setattr must still succeed without dereferencing + // it. StoreMode is true to confirm that even when persistence is + // enabled, mode changes on symlinks are silently dropped. + s := &Symlink{ + Target: "/some/target", + Cfg: &Config{StoreMode: true}, + } + + in := &fuse.SetAttrIn{} + in.Valid = fuse.FATTR_MODE + in.Mode = 0o600 + + out := &fuse.AttrOut{} + if errno := s.Setattr(t.Context(), nil, in, out); errno != 0 { + t.Fatalf("Symlink.Setattr returned errno %v, want 0", errno) + } + + // fillAttr must report the POSIX symlink mode (0o777), not the + // caller-supplied value, because the request is not stored. + if got := out.Attr.Mode & 0o777; got != 0o777 { + t.Fatalf("Symlink mode = 0o%o, want 0o777", got) + } +} + +// TestStatfsReportsSpace verifies that Dir.Statfs proxies the +// disk-space statistics of the repo's backing filesystem, and that an +// empty RepoPath produces zeroed (but successful) results. +func TestStatfsReportsSpace(t *testing.T) { + t.Run("matches repo filesystem", func(t *testing.T) { + dir := t.TempDir() + d := &Dir{Cfg: &Config{RepoPath: dir}} + out := &fuse.StatfsOut{} + if errno := d.Statfs(t.Context(), out); errno != 0 { + t.Fatalf("Statfs returned errno %v, want 0", errno) + } + + // Verify we got real filesystem data (non-zero) and that + // free blocks don't exceed total blocks. Exact comparison + // against a second syscall.Statfs call is racy because CI + // writes can change block counts between the two calls. + if out.Blocks == 0 { + t.Fatal("Blocks = 0, expected non-zero for a real filesystem") + } + if out.Bfree > out.Blocks { + t.Fatalf("Bfree (%d) > Blocks (%d)", out.Bfree, out.Blocks) + } + }) + + t.Run("empty repo path", func(t *testing.T) { + d := &Dir{Cfg: &Config{}} + out := &fuse.StatfsOut{} + if errno := d.Statfs(t.Context(), out); errno != 0 { + t.Fatalf("Statfs returned errno %v, want 0", errno) + } + if out.Blocks != 0 { + t.Fatalf("expected zeroed Blocks when RepoPath is empty, got %d", out.Blocks) + } + }) +} diff --git a/go.mod b/go.mod index cfffd2b69b4..8792ae79177 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,8 @@ module github.com/ipfs/kubo -go 1.25.0 +go 1.26.2 require ( - bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc contrib.go.opencensus.io/exporter/prometheus v0.4.2 github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 github.com/blang/semver/v4 v4.0.0 @@ -18,12 +17,13 @@ require ( github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 github.com/fsnotify/fsnotify v1.9.0 github.com/google/uuid v1.6.0 - github.com/hashicorp/go-version v1.8.0 + github.com/hanwen/go-fuse/v2 v2.9.1-0.20260323175136-8b5aa92e8e7c + github.com/hashicorp/go-version v1.9.0 github.com/ipfs-shipyard/nopfs v0.0.14 github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 - github.com/ipfs/boxo v0.37.0 + github.com/ipfs/boxo v0.38.0 github.com/ipfs/go-block-format v0.2.3 - github.com/ipfs/go-cid v0.6.0 + github.com/ipfs/go-cid v0.6.1 github.com/ipfs/go-cidutil v0.1.1 github.com/ipfs/go-datastore v0.9.1 github.com/ipfs/go-detect-race v0.0.1 @@ -31,17 +31,17 @@ require ( github.com/ipfs/go-ds-flatfs v0.6.0 github.com/ipfs/go-ds-leveldb v0.5.2 github.com/ipfs/go-ds-measure v0.2.2 - github.com/ipfs/go-ds-pebble v0.5.9 + github.com/ipfs/go-ds-pebble v0.5.10 github.com/ipfs/go-fs-lock v0.1.1 github.com/ipfs/go-ipfs-cmds v0.16.0 github.com/ipfs/go-ipld-cbor v0.2.1 github.com/ipfs/go-ipld-format v0.6.3 github.com/ipfs/go-ipld-git v0.1.1 - github.com/ipfs/go-ipld-legacy v0.2.2 + github.com/ipfs/go-ipld-legacy v0.3.0 github.com/ipfs/go-log/v2 v2.9.1 github.com/ipfs/go-metrics-interface v0.3.0 github.com/ipfs/go-metrics-prometheus v0.1.0 - github.com/ipfs/go-test v0.2.3 + github.com/ipfs/go-test v0.3.0 github.com/ipfs/go-unixfsnode v1.10.3 github.com/ipld/go-car/v2 v2.16.0 github.com/ipld/go-codec-dagpb v1.7.0 @@ -50,9 +50,9 @@ require ( github.com/jbenet/go-temp-err-catcher v0.1.0 github.com/julienschmidt/httprouter v1.3.0 github.com/libp2p/go-doh-resolver v0.5.0 - github.com/libp2p/go-libp2p v0.47.0 + github.com/libp2p/go-libp2p v0.48.0 github.com/libp2p/go-libp2p-http v0.5.0 - github.com/libp2p/go-libp2p-kad-dht v0.38.0 + github.com/libp2p/go-libp2p-kad-dht v0.39.0 github.com/libp2p/go-libp2p-kbucket v0.8.0 github.com/libp2p/go-libp2p-pubsub v0.15.0 github.com/libp2p/go-libp2p-pubsub-router v0.6.0 @@ -63,7 +63,7 @@ require ( github.com/miekg/dns v1.1.72 github.com/multiformats/go-multiaddr v0.16.1 github.com/multiformats/go-multiaddr-dns v0.5.0 - github.com/multiformats/go-multibase v0.2.0 + github.com/multiformats/go-multibase v0.3.0 github.com/multiformats/go-multicodec v0.10.0 github.com/multiformats/go-multihash v0.2.3 github.com/opentracing/opentracing-go v1.2.0 @@ -77,25 +77,27 @@ require ( github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 go.opencensus.io v0.24.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 go.opentelemetry.io/contrib/propagators/autoprop v0.46.1 - go.opentelemetry.io/otel v1.40.0 + go.opentelemetry.io/otel v1.42.0 go.opentelemetry.io/otel/exporters/prometheus v0.56.0 - go.opentelemetry.io/otel/sdk v1.40.0 - go.opentelemetry.io/otel/sdk/metric v1.40.0 - go.opentelemetry.io/otel/trace v1.40.0 + go.opentelemetry.io/otel/sdk v1.42.0 + go.opentelemetry.io/otel/sdk/metric v1.42.0 + go.opentelemetry.io/otel/trace v1.42.0 go.uber.org/dig v1.19.0 go.uber.org/fx v1.24.0 go.uber.org/zap v1.27.1 - golang.org/x/crypto v0.48.0 - golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a - golang.org/x/mod v0.33.0 - golang.org/x/sync v0.19.0 - golang.org/x/sys v0.41.0 + golang.org/x/crypto v0.50.0 + golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 + golang.org/x/mod v0.34.0 + golang.org/x/sync v0.20.0 + golang.org/x/sys v0.43.0 google.golang.org/protobuf v1.36.11 ) require ( + filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5 // indirect + filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect github.com/DataDog/zstd v1.5.7 // indirect github.com/Jorropo/jsync v1.0.1 // indirect @@ -119,7 +121,7 @@ require ( github.com/cskr/pubsub v1.0.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 // indirect github.com/dgraph-io/badger v1.6.2 // indirect github.com/dgraph-io/ristretto v0.0.2 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect @@ -144,12 +146,12 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect github.com/guillaumemichel/reservedpool v0.3.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect - github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/bbloom v0.1.0 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-dsqueue v0.2.0 // indirect github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect @@ -185,7 +187,7 @@ require ( github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect + github.com/mr-tron/base58 v1.3.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect @@ -195,21 +197,18 @@ require ( github.com/onsi/gomega v1.36.3 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/pion/datachannel v1.5.10 // indirect - github.com/pion/dtls/v2 v2.2.12 // indirect - github.com/pion/dtls/v3 v3.1.1 // indirect + github.com/pion/dtls/v3 v3.1.2 // indirect github.com/pion/ice/v4 v4.0.10 // indirect github.com/pion/interceptor v0.1.40 // indirect github.com/pion/logging v0.2.4 // indirect github.com/pion/mdns/v2 v2.0.7 // indirect github.com/pion/randutil v0.1.0 // indirect - github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtcp v1.2.16 // indirect github.com/pion/rtp v1.8.19 // indirect github.com/pion/sctp v1.8.39 // indirect - github.com/pion/sdp/v3 v3.0.13 // indirect + github.com/pion/sdp/v3 v3.0.18 // indirect github.com/pion/srtp/v3 v3.0.6 // indirect - github.com/pion/stun v0.6.1 // indirect - github.com/pion/stun/v3 v3.0.0 // indirect - github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/stun/v3 v3.1.1 // indirect github.com/pion/transport/v3 v3.0.7 // indirect github.com/pion/transport/v4 v4.0.1 // indirect github.com/pion/turn/v4 v4.0.2 // indirect @@ -218,8 +217,8 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.1-0.20231129105047-37766d95467a // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.17.0 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/procfs v0.20.1 // indirect github.com/prometheus/statsd_exporter v0.27.1 // indirect github.com/quic-go/qpack v0.6.0 // indirect github.com/quic-go/quic-go v0.59.0 // indirect @@ -245,29 +244,29 @@ require ( go.opentelemetry.io/contrib/propagators/b3 v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/ot v1.21.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 // indirect - go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.42.0 // indirect + go.opentelemetry.io/otel/metric v1.42.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap/exp v0.3.0 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v2 v2.4.4 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/net v0.50.0 // indirect + golang.org/x/net v0.52.0 // indirect golang.org/x/oauth2 v0.35.0 // indirect - golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4 // indirect - golang.org/x/term v0.40.0 // indirect - golang.org/x/text v0.34.0 // indirect + golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c // indirect + golang.org/x/term v0.42.0 // indirect + golang.org/x/text v0.36.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.42.0 // indirect + golang.org/x/tools v0.43.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.17.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect - google.golang.org/grpc v1.78.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect + google.golang.org/grpc v1.79.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect diff --git a/go.sum b/go.sum index 819906d270e..534da98b5f5 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,3 @@ -bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510= -bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -41,6 +39,10 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5 h1:JA0fFr+kxpqTdxR9LOBiTWpGNchqmkcsgmdeJZRclZ0= +filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5/go.mod h1:OjOXDNlClLblvXdwgFFOQFJEocLhhtai8vGLy0JCZlI= +filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b h1:REI1FbdW71yO56Are4XAxD+OS/e+BQsB3gE4mZRQEXY= +filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b/go.mod h1:9nnw1SlYHYuPSo/3wjQzNjSbeHlq2NsKo5iEtfJPWP0= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= @@ -99,6 +101,8 @@ github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnO github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4= github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA= github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= +github.com/canonical/go-sp800.90a-drbg v0.0.0-20210314144037-6eeb1040d6c3 h1:oe6fCvaEpkhyW3qAicT0TnGtyht/UrgvOwMcEgLb7Aw= +github.com/canonical/go-sp800.90a-drbg v0.0.0-20210314144037-6eeb1040d6c3/go.mod h1:qdP0gaj0QtgX2RUZhnlVrceJ+Qln8aSlDyJwelLLFeM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= @@ -163,8 +167,8 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 h1:5RVFMOWjMyRy8cARdy79nAmgYw3hK/4HUq48LQ6Wwqo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= @@ -342,12 +346,14 @@ github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWS github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= github.com/guillaumemichel/reservedpool v0.3.0 h1:eqqO/QvTllLBrit7LVtVJBqw4cD0WdV9ajUe7WNTajw= github.com/guillaumemichel/reservedpool v0.3.0/go.mod h1:sXSDIaef81TFdAJglsCFCMfgF5E5Z5xK1tFhjDhvbUc= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hanwen/go-fuse/v2 v2.9.1-0.20260323175136-8b5aa92e8e7c h1:m4bneA0dtaIhyTOJZCvcka670ZwDEiSomj5EARK1Jxc= +github.com/hanwen/go-fuse/v2 v2.9.1-0.20260323175136-8b5aa92e8e7c/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -360,8 +366,8 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= -github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.9.0 h1:CeOIz6k+LoN3qX9Z0tyQrPtiB1DFYRPfCIBtaXPSCnA= +github.com/hashicorp/go-version v1.9.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -384,10 +390,10 @@ github.com/ipfs-shipyard/nopfs v0.0.14 h1:HFepJt/MxhZ3/GsLZkkAPzIPdNYKaLO1Qb7YmP github.com/ipfs-shipyard/nopfs v0.0.14/go.mod h1:mQyd0BElYI2gB/kq/Oue97obP4B3os4eBmgfPZ+hnrE= github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcdHUd7SDsUOY= github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU= -github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= -github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.37.0 h1:2E3mZvydMI2t5IkAgtkmZ3sGsld0oS7o3I+xyzDk6uI= -github.com/ipfs/boxo v0.37.0/go.mod h1:8yyiRn54F2CsW13n0zwXEPrVsZix/gFj9SYIRYMZ6KE= +github.com/ipfs/bbloom v0.1.0 h1:nIWwfIE3AaG7RCDQIsrUonGCOTp7qSXzxH7ab/ss964= +github.com/ipfs/bbloom v0.1.0/go.mod h1:lDy3A3i6ndgEW2z1CaRFvDi5/ZTzgM1IxA/pkL7Wgts= +github.com/ipfs/boxo v0.38.0 h1:Kt/swuNXAtVXs7EP6KEjB5+2lo5/tTrvWzjakQ8IiOo= +github.com/ipfs/boxo v0.38.0/go.mod h1:A6DRpImSXihx6MiEHOeBjXqleDqK5JX3yWDxM0WygPo= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= @@ -396,8 +402,8 @@ github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xg github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30= -github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ= +github.com/ipfs/go-cid v0.6.1 h1:T5TnNb08+ueovG76Z5gx1L4Y7QOaGTXHg1F6raWFxIc= +github.com/ipfs/go-cid v0.6.1/go.mod h1:zrY0SwOhjrrIdfPQ/kf+k1sXyJ0QE7cMxfCployLBs0= github.com/ipfs/go-cidutil v0.1.1 h1:COuby6H8C2ml0alvHYX3WdbFM4F07YtbY0UlT5j+sgI= github.com/ipfs/go-cidutil v0.1.1/go.mod h1:SCoUftGEUgoXe5Hjeyw5CiLZF8cwYn/TbtpFQXJCP6k= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= @@ -416,8 +422,8 @@ github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo= github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICkqGI= github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs= -github.com/ipfs/go-ds-pebble v0.5.9 h1:D1FEuMxjbEmDADNqsyT74n9QHVAn12nv9i9Qa15AFYc= -github.com/ipfs/go-ds-pebble v0.5.9/go.mod h1:XmUBN05l6B+tMg7mpMS75ZcKW/CX01uZMhhWw85imQA= +github.com/ipfs/go-ds-pebble v0.5.10 h1:MsSPrq4ubtaWGaIvdE5+L227wEaoxs7nWEb6+lKojNE= +github.com/ipfs/go-ds-pebble v0.5.10/go.mod h1:ShbyLsills0WD9TJavOHu7uEDj/LwDW1WW91G4+W4X8= github.com/ipfs/go-dsqueue v0.2.0 h1:MBi9w3oSiX98Xc+Y7NuJ9G8MI6mAT4IGdO9dHEMCZzU= github.com/ipfs/go-dsqueue v0.2.0/go.mod h1:8FfNQC4DMF/KkzBXRNB9Rb3MKDW0Sh98HMtXYl1mLQE= github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw= @@ -441,8 +447,8 @@ github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rA github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk= github.com/ipfs/go-ipld-git v0.1.1 h1:TWGnZjS0htmEmlMFEkA3ogrNCqWjIxwr16x1OsdhG+Y= github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYDpKUkJubI= -github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ= -github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU= +github.com/ipfs/go-ipld-legacy v0.3.0 h1:7XhFKkRyCvP5upOlQfKUFIqL3S5DEZnbUE4bQmQ/tNE= +github.com/ipfs/go-ipld-legacy v0.3.0/go.mod h1:Ukef9ARQiX+RVetwH2XiReLgJvQDEXcUPszrZ1KRjKI= github.com/ipfs/go-libdht v0.5.0 h1:ZN+eCqwahZvUeT0e4DsIxRtm78Mc9UR5tmZUiMsrGjQ= github.com/ipfs/go-libdht v0.5.0/go.mod h1:L3YiuFXecLeZZFuuVRM0hjg1GgVhARzUdahFsuqSa7w= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= @@ -454,8 +460,8 @@ github.com/ipfs/go-metrics-prometheus v0.1.0 h1:bApWOHkrH3VTBHzTHrZSfq4n4weOZDzZ github.com/ipfs/go-metrics-prometheus v0.1.0/go.mod h1:2GtL525C/4yxtvSXpRJ4dnE45mCX9AS0XRa03vHx7G0= github.com/ipfs/go-peertaskqueue v0.8.3 h1:tBPpGJy+A92RqtRFq5amJn0Uuj8Pw8tXi0X3eHfHM8w= github.com/ipfs/go-peertaskqueue v0.8.3/go.mod h1:OqVync4kPOcXEGdj/LKvox9DCB5mkSBeXsPczCxLtYA= -github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= -github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= +github.com/ipfs/go-test v0.3.0 h1:0Y4Uve3tp9HI+2lIJjfOliOrOgv/YpXg/l1y3P4DEYE= +github.com/ipfs/go-test v0.3.0/go.mod h1:JK+U8pRpATZb7lsYNSJlCj3WYB3cFfWIbI6nWRM/GFk= github.com/ipfs/go-unixfsnode v1.10.3 h1:c8sJjuGNkxXAQH75P+f5ngPda/9T+DrboVA0TcDGvGI= github.com/ipfs/go-unixfsnode v1.10.3/go.mod h1:2Jlc7DoEwr12W+7l8Hr6C7XF4NHST3gIkqSArLhGSxU= github.com/ipld/go-car/v2 v2.16.0 h1:LWe0vmN/QcQmUU4tr34W5Nv5mNraW+G6jfN2s+ndBco= @@ -533,8 +539,8 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= -github.com/libp2p/go-libp2p v0.47.0 h1:qQpBjSCWNQFF0hjBbKirMXE9RHLtSuzTDkTfr1rw0yc= -github.com/libp2p/go-libp2p v0.47.0/go.mod h1:s8HPh7mMV933OtXzONaGFseCg/BE//m1V34p3x4EUOY= +github.com/libp2p/go-libp2p v0.48.0 h1:h2BrLAgrj7X8bEN05K7qmrjpNHYA+6tnsGRdprjTnvo= +github.com/libp2p/go-libp2p v0.48.0/go.mod h1:Q1fBZNdmC2Hf82husCTfkKJVfHm2we5zk+NWmOGEmWk= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= @@ -543,8 +549,8 @@ github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qk github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA= github.com/libp2p/go-libp2p-http v0.5.0 h1:+x0AbLaUuLBArHubbbNRTsgWz0RjNTy6DJLOxQ3/QBc= github.com/libp2p/go-libp2p-http v0.5.0/go.mod h1:glh87nZ35XCQyFsdzZps6+F4HYI6DctVFY5u1fehwSg= -github.com/libp2p/go-libp2p-kad-dht v0.38.0 h1:NToFzwvICo6ghDfSwuTmROCtl9LDXSZT1VawEbm4NUs= -github.com/libp2p/go-libp2p-kad-dht v0.38.0/go.mod h1:g/CefQilAnCMyUH52A6tUGbe17NgQ8q26MaZCA968iI= +github.com/libp2p/go-libp2p-kad-dht v0.39.0 h1:mww38eBYiUvdsu+Xl/GLlBC0Aa8M+5HAwvafkFOygAM= +github.com/libp2p/go-libp2p-kad-dht v0.39.0/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= @@ -632,6 +638,8 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -640,8 +648,9 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.3.0 h1:K6Y13R2h+dku0wOqKtecgRnBUBPrZzLZy5aIj8lCcJI= +github.com/mr-tron/base58 v1.3.0/go.mod h1:2BuubE67DCSWwVfx37JWNG8emOC0sHEU4/HpcYgCLX8= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= @@ -660,8 +669,8 @@ github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDu github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multibase v0.3.0 h1:8helZD2+4Db7NNWFiktk2NePbF0boolBe6bDQvM4r68= +github.com/multiformats/go-multibase v0.3.0/go.mod h1:MoBLQPCkRTOL3eveIPO81860j2AQY8JwcnNlRkGRUfI= github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc= github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= @@ -716,40 +725,30 @@ github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4 github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= -github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= -github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= -github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/dtls/v3 v3.1.1 h1:wSLMam9Kf7DL1A74hnqRvEb9OT+aXPAsQ5VS+BdXOJ0= -github.com/pion/dtls/v3 v3.1.1/go.mod h1:7FGvVYpHsUV6+aywaFpG7aE4Vz8nBOx74odPRFue6cI= +github.com/pion/dtls/v3 v3.1.2 h1:gqEdOUXLtCGW+afsBLO0LtDD8GnuBBjEy6HRtyofZTc= +github.com/pion/dtls/v3 v3.1.2/go.mod h1:Hw/igcX4pdY69z1Hgv5x7wJFrUkdgHwAn/Q/uo7YHRo= github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= -github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8= github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so= github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= -github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtcp v1.2.16 h1:fk1B1dNW4hsI78XUCljZJlC4kZOPk67mNRuQ0fcEkSo= +github.com/pion/rtcp v1.2.16/go.mod h1:/as7VKfYbs5NIb4h6muQ35kQF/J0ZVNz2Z3xKoCBYOo= github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= -github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= -github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= +github.com/pion/sdp/v3 v3.0.18 h1:l0bAXazKHpepazVdp+tPYnrsy9dfh7ZbT8DxesH5ZnI= +github.com/pion/sdp/v3 v3.0.18/go.mod h1:ZREGo6A9ZygQ9XkqAj5xYCQtQpif0i6Pa81HOiAdqQ8= github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= -github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= -github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= -github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= -github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= -github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= -github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/stun/v3 v3.1.1 h1:CkQxveJ4xGQjulGSROXbXq94TAWu8gIX2dT+ePhUkqw= +github.com/pion/stun/v3 v3.1.1/go.mod h1:qC1DfmcCTQjl9PBaMa5wSn3x9IPmKxSdcCsxBcDBndM= github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= github.com/pion/transport/v4 v4.0.1 h1:sdROELU6BZ63Ab7FrOLn13M6YdJLY20wldXW2Cu2k8o= @@ -794,16 +793,16 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc= +github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/prometheus/statsd_exporter v0.27.1 h1:tcRJOmwlA83HPfWzosAgr2+zEN5XDFv+M2mn/uYkn5Y= github.com/prometheus/statsd_exporter v0.27.1/go.mod h1:vA6ryDfsN7py/3JApEst6nLTJboq66XsNcJGNmC88NQ= @@ -878,7 +877,6 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= @@ -900,8 +898,6 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -927,7 +923,6 @@ github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 h1:ctS9An github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1/go.mod h1:tKH72zYNt/exx6/5IQO6L9LoQ0rEjd5SbbWaDTs9Zso= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= @@ -959,8 +954,8 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= go.opentelemetry.io/contrib/propagators/autoprop v0.46.1 h1:cXTYcMjY0dsYokAuo8LbNBQxpF8VgTHdiHJJ1zlIXl4= go.opentelemetry.io/contrib/propagators/autoprop v0.46.1/go.mod h1:WZxgny1/6+j67B1s72PLJ4bGjidoWFzSmLNfJKVt2bo= go.opentelemetry.io/contrib/propagators/aws v1.21.1 h1:uQIQIDWb0gzyvon2ICnghpLAf9w7ADOCUiIiwCQgR2o= @@ -971,26 +966,26 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 h1:f4beMGDKiVzg9IcX7/VuWV go.opentelemetry.io/contrib/propagators/jaeger v1.21.1/go.mod h1:U9jhkEl8d1LL+QXY7q3kneJWJugiN3kZJV2OWz3hkBY= go.opentelemetry.io/contrib/propagators/ot v1.21.1 h1:3TN5vkXjKYWp0YdMcnUEC/A+pBPvqz9V3nCS2xmcurk= go.opentelemetry.io/contrib/propagators/ot v1.21.1/go.mod h1:oy0MYCbS/b3cqUDW37wBWtlwBIsutngS++Lklpgh+fc= -go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= -go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= +go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= +go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 h1:THuZiwpQZuHPul65w4WcwEnkX2QIuMT+UFoOrygtoJw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0/go.mod h1:J2pvYM5NGHofZ2/Ru6zw/TNWnEQp5crgyDeSrYpXkAw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 h1:zWWrB1U6nqhS/k6zYB74CjRpuiitRtLLi68VcgmOEto= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0/go.mod h1:2qXPNBX1OVRC0IwOnfo1ljoid+RD0QK3443EaqVlsOU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0 h1:uLXP+3mghfMf7XmV4PkGfFhFKuNWoCvvx5wP/wOXo0o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0/go.mod h1:v0Tj04armyT59mnURNUJf7RCKcKzq+lgJs6QSjHjaTc= go.opentelemetry.io/otel/exporters/prometheus v0.56.0 h1:GnCIi0QyG0yy2MrJLzVrIM7laaJstj//flf1zEJCG+E= go.opentelemetry.io/otel/exporters/prometheus v0.56.0/go.mod h1:JQcVZtbIIPM+7SWBB+T6FK+xunlyidwLp++fN0sUaOk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 h1:MzfofMZN8ulNqobCmCAVbqVL5syHw+eB2qPRkCMA/fQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0/go.mod h1:E73G9UFtKRXrxhBsHtG00TB5WxX57lpsQzogDkqBTz8= -go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= -go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= -go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= -go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= -go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= -go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= -go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= -go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.42.0 h1:s/1iRkCKDfhlh1JF26knRneorus8aOwVIDhvYx9WoDw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.42.0/go.mod h1:UI3wi0FXg1Pofb8ZBiBLhtMzgoTm1TYkMvn71fAqDzs= +go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= +go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= +go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= +go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= +go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= +go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= +go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= +go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1010,8 +1005,8 @@ go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U= go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= +go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1031,11 +1026,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= -golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1046,8 +1038,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= -golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1076,8 +1068,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1129,12 +1121,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= -golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1165,8 +1153,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1192,7 +1180,6 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1242,25 +1229,17 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= -golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4 h1:bTLqdHv7xrGlFbvf5/TXNxy/iUwwdkjhqQTJDjW7aj0= -golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4/go.mod h1:g5NllXBEermZrmR51cJDQxmJUHUOfRAaNyWBM+R+548= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c h1:6a8FdnNk6bTXBjR4AGKFgUKuo+7GnR3FX5L7CbveeZc= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c/go.mod h1:TpUTTEp9frx7rTdLpC9gFG9kdI7zVLFTFFlqaH2Cncw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= -golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1272,11 +1251,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= -golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1341,8 +1317,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= -golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= +golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= +golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1422,10 +1398,10 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= -google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1446,8 +1422,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= -google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU= +google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/internal/fusemount/context.go b/internal/fusemount/context.go new file mode 100644 index 00000000000..a51ffa26bdc --- /dev/null +++ b/internal/fusemount/context.go @@ -0,0 +1,31 @@ +// Package fusemount provides internal helpers shared between the FUSE +// mount layer and the core API. It lives under internal/ so that +// external consumers of kubo cannot bypass publish guards. +package fusemount + +import "context" + +// publishKey is a context key that lets the IPNS FUSE mount's +// internal MFS republisher bypass the "cannot manually publish while +// IPNS is mounted" guard in the Name API. Without this bypass the +// guard blocks the mount's own publishes and silently drops IPNS +// updates, causing data written through the FUSE mount to be lost +// on daemon restart (see https://github.com/ipfs/kubo/issues/2168). +// +// TODO: the /ipns/ FUSE mount does not detect changes when a +// locally-owned key is published via `ipfs name publish` (RPC/CLI). +// A larger refactor is needed so the mountpoint's MFS representation +// is updated to reflect external publishes to locally-owned keys, +// rather than silently overwriting them on the next MFS flush. +type publishKey struct{} + +// ContextWithPublish marks ctx as originating from the FUSE mount's +// internal publish path. +func ContextWithPublish(ctx context.Context) context.Context { + return context.WithValue(ctx, publishKey{}, true) +} + +// IsPublish reports whether ctx was marked by [ContextWithPublish]. +func IsPublish(ctx context.Context) bool { + return ctx.Value(publishKey{}) != nil +} diff --git a/mk/golang.mk b/mk/golang.mk index 53bf5fca23e..0551cb78622 100644 --- a/mk/golang.mk +++ b/mk/golang.mk @@ -66,9 +66,33 @@ TEST_CLI_TIMEOUT ?= 10m test_cli: cmd/ipfs/ipfs test/bin/gotestsum $$(DEPS_GO) mkdir -p test/cli rm -f test/cli/cli-tests.json - PATH="$(CURDIR)/cmd/ipfs:$(CURDIR)/test/bin:$$PATH" gotestsum $(GOTESTSUM_NOCOLOR) --jsonfile test/cli/cli-tests.json -- -v -timeout=$(TEST_CLI_TIMEOUT) ./test/cli/... ./test/integration/... ./client/rpc/... + TEST_FUSE=0 PATH="$(CURDIR)/cmd/ipfs:$(CURDIR)/test/bin:$$PATH" gotestsum $(GOTESTSUM_NOCOLOR) --jsonfile test/cli/cli-tests.json -- -v -timeout=$(TEST_CLI_TIMEOUT) ./test/cli/... ./test/integration/... ./client/rpc/... .PHONY: test_cli +# FUSE tests (requires /dev/fuse and fusermount in PATH) +# TEST_FUSE=1 makes mount failures fatal instead of skipping +# Keep this shorter than the CI job timeout so a hang trips Go's panic +# (and prints stack traces) instead of getting silently killed by CI. +TEST_FUSE_TIMEOUT ?= 4m + +# FUSE unit tests (./fuse/...) +test_fuse_unit: test/bin/gotestsum $$(DEPS_GO) + mkdir -p test/fuse + rm -f test/fuse/fuse-unit-tests.json + TEST_FUSE=1 gotestsum $(GOTESTSUM_NOCOLOR) --jsonfile test/fuse/fuse-unit-tests.json -- -v -timeout=$(TEST_FUSE_TIMEOUT) ./fuse/... +.PHONY: test_fuse_unit + +# FUSE CLI integration tests (test/cli/fuse/) +test_fuse_cli: cmd/ipfs/ipfs test/bin/gotestsum $$(DEPS_GO) + mkdir -p test/fuse + rm -f test/fuse/fuse-cli-tests.json + TEST_FUSE=1 PATH="$(CURDIR)/cmd/ipfs:$(CURDIR)/test/bin:$$PATH" gotestsum $(GOTESTSUM_NOCOLOR) --jsonfile test/fuse/fuse-cli-tests.json -- -v -timeout=$(TEST_FUSE_TIMEOUT) ./test/cli/fuse/... +.PHONY: test_fuse_cli + +# Combined: run all FUSE tests +test_fuse: test_fuse_unit test_fuse_cli +.PHONY: test_fuse + # Example tests (docs/examples/kubo-as-a-library) # Tests against both published and current kubo versions # Uses timeout to ensure CI gets output before job-level timeout kills everything diff --git a/plugin/loader/load_nocgo.go b/plugin/loader/load_nocgo.go index 3e0f393377d..231c12d35a5 100644 --- a/plugin/loader/load_nocgo.go +++ b/plugin/loader/load_nocgo.go @@ -1,4 +1,5 @@ -//go:build !cgo && !noplugin && (linux || darwin || freebsd) +// Plugin preloading without cgo (no dlopen, plugins are compiled in). +//go:build (linux || darwin || freebsd) && !cgo && !noplugin package loader diff --git a/plugin/loader/load_noplugin.go b/plugin/loader/load_noplugin.go index dddeac91d74..e598cd77447 100644 --- a/plugin/loader/load_noplugin.go +++ b/plugin/loader/load_noplugin.go @@ -1,3 +1,4 @@ +// No-op plugin loader when built with "go build -tags noplugin". //go:build noplugin package loader diff --git a/plugin/loader/load_unix.go b/plugin/loader/load_unix.go index 05af3019719..a11a3807dde 100644 --- a/plugin/loader/load_unix.go +++ b/plugin/loader/load_unix.go @@ -1,4 +1,5 @@ -//go:build cgo && !noplugin && (linux || darwin || freebsd) +// Plugin loading with cgo (uses dlopen to load .so plugins at runtime). +//go:build (linux || darwin || freebsd) && cgo && !noplugin package loader diff --git a/routing/delegated.go b/routing/delegated.go index 1c6d45ae1b4..e9266ba8f5a 100644 --- a/routing/delegated.go +++ b/routing/delegated.go @@ -160,12 +160,12 @@ func parse(visited map[string]bool, type ExtraHTTPParams struct { PeerID string - Addrs []string + AddrFunc func() []ma.Multiaddr // dynamic address resolver for provider records PrivKeyB64 string HTTPRetrieval bool } -func ConstructHTTPRouter(endpoint string, peerID string, addrs []string, privKey string, httpRetrieval bool) (routing.Routing, error) { +func ConstructHTTPRouter(endpoint string, peerID string, addrFunc func() []ma.Multiaddr, privKey string, httpRetrieval bool) (routing.Routing, error) { return httpRoutingFromConfig( config.Router{ Type: "http", @@ -175,7 +175,7 @@ func ConstructHTTPRouter(endpoint string, peerID string, addrs []string, privKey }, &ExtraHTTPParams{ PeerID: peerID, - Addrs: addrs, + AddrFunc: addrFunc, PrivKeyB64: privKey, HTTPRetrieval: httpRetrieval, }, @@ -226,21 +226,28 @@ func httpRoutingFromConfig(conf config.Router, extraHTTP *ExtraHTTPParams) (rout return nil, err } - addrInfo, err := createAddrInfo(extraHTTP.PeerID, extraHTTP.Addrs) + protocols := config.DefaultHTTPRoutersFilterProtocols + if extraHTTP.HTTPRetrieval { + protocols = append(protocols, "transport-ipfs-gateway-http") + } + + peerID, err := peer.Decode(extraHTTP.PeerID) if err != nil { return nil, err } - protocols := config.DefaultHTTPRoutersFilterProtocols - if extraHTTP.HTTPRetrieval { - protocols = append(protocols, "transport-ipfs-gateway-http") + var providerInfoOpt drclient.Option + if extraHTTP.AddrFunc != nil { + providerInfoOpt = drclient.WithProviderInfoFunc(peerID, extraHTTP.AddrFunc) + } else { + providerInfoOpt = drclient.WithProviderInfo(peerID, nil) } cli, err := drclient.New( params.Endpoint, drclient.WithHTTPClient(delegateHTTPClient), drclient.WithIdentity(key), - drclient.WithProviderInfo(addrInfo.ID, addrInfo.Addrs), + providerInfoOpt, drclient.WithUserAgent(version.GetUserAgentVersion()), drclient.WithProtocolFilter(protocols), drclient.WithStreamResultsRequired(), // https://specs.ipfs.tech/routing/http-routing-v1/#streaming @@ -278,28 +285,6 @@ func decodePrivKey(keyB64 string) (ic.PrivKey, error) { return ic.UnmarshalPrivateKey(pk) } -func createAddrInfo(peerID string, addrs []string) (peer.AddrInfo, error) { - pID, err := peer.Decode(peerID) - if err != nil { - return peer.AddrInfo{}, err - } - - var mas []ma.Multiaddr - for _, a := range addrs { - m, err := ma.NewMultiaddr(a) - if err != nil { - return peer.AddrInfo{}, err - } - - mas = append(mas, m) - } - - return peer.AddrInfo{ - ID: pID, - Addrs: mas, - }, nil -} - type ExtraDHTParams struct { BootstrapPeers []peer.AddrInfo Host host.Host diff --git a/test/cli/cid_base_test.go b/test/cli/cid_base_test.go new file mode 100644 index 00000000000..6ffd3d5075b --- /dev/null +++ b/test/cli/cid_base_test.go @@ -0,0 +1,217 @@ +package cli + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +// TestCidBase verifies that --cid-base is respected across commands +// and that CIDv0 is auto-upgraded to CIDv1 when a non-base58btc base +// is requested. +// +// Tests use base16 rather than base32 to avoid false positives if +// base32 ever becomes the default CID encoding. +func TestCidBase(t *testing.T) { + t.Parallel() + + const cidBaseFlag = "--cid-base=base16" + // base16 CIDv1 starts with "f01" (f = base16 multibase prefix) + const cidV1Prefix = "f01" + + makeDaemon := func(t *testing.T) *harness.Node { + t.Helper() + node := harness.NewT(t).NewNode().Init().StartDaemon("--offline") + t.Cleanup(func() { node.StopDaemon() }) + return node + } + + t.Run("add respects --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + // ipfs add -q + cid := node.IPFSAddStr("test-add", cidBaseFlag) + require.True(t, strings.HasPrefix(cid, cidV1Prefix), "expected base16 CIDv1 from add, got %s", cid) + + // ipfs add -Q (quiet, only final CID) + cid = node.PipeStrToIPFS("test-add-Q", "add", "-Q", cidBaseFlag).Stdout.Trimmed() + require.True(t, strings.HasPrefix(cid, cidV1Prefix), "expected base16 CIDv1 from add -Q, got %s", cid) + }) + + t.Run("pin ls respects --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + node.IPFSAddStr("pin-ls-test") + + lines := node.IPFS("pin", "ls", "-t", "recursive", cidBaseFlag).Stdout.Lines() + for _, line := range lines { + if line == "" { + continue + } + require.True(t, strings.HasPrefix(line, cidV1Prefix), "expected base16 CID in pin ls, got %s", line) + } + }) + + t.Run("dag import respects --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + // Add content and export as CAR + cid := node.IPFSAddStr("dag-import-test", "--pin=false") + carData := node.IPFS("dag", "export", cid).Stdout.Bytes() + + // Import the CAR with --cid-base + out := node.PipeToIPFS(bytes.NewReader(carData), "dag", "import", cidBaseFlag).Stdout.Trimmed() + require.Contains(t, out, cidV1Prefix, "expected base16 CID in dag import output, got %s", out) + }) + + t.Run("block put returns base16 CIDv1", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + cid := node.PipeStrToIPFS("hello", "block", "put", cidBaseFlag).Stdout.Trimmed() + require.True(t, strings.HasPrefix(cid, cidV1Prefix), "expected base16 CIDv1, got %s", cid) + }) + + t.Run("block put --format=v0 auto-upgrades to CIDv1 with --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + // Without --cid-base: CIDv0 in base58btc + cidV0 := node.PipeStrToIPFS("hello", "block", "put", "--format=v0").Stdout.Trimmed() + require.True(t, strings.HasPrefix(cidV0, "Qm"), "expected CIDv0, got %s", cidV0) + + // With --cid-base: same content but displayed as CIDv1 + cidV1 := node.PipeStrToIPFS("hello", "block", "put", "--format=v0", cidBaseFlag).Stdout.Trimmed() + require.True(t, strings.HasPrefix(cidV1, cidV1Prefix), "expected base16 CIDv1, got %s", cidV1) + }) + + t.Run("block stat respects --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + cidV0 := node.PipeStrToIPFS("test-block-stat", "block", "put", "--format=v0").Stdout.Trimmed() + require.True(t, strings.HasPrefix(cidV0, "Qm")) + + // block stat without --cid-base returns CIDv0 + stat := node.IPFS("block", "stat", cidV0).Stdout.Trimmed() + require.Contains(t, stat, cidV0) + + // block stat with --cid-base returns CIDv1 + stat = node.IPFS("block", "stat", cidBaseFlag, cidV0).Stdout.Trimmed() + require.NotContains(t, stat, cidV0, "should not contain CIDv0") + require.Contains(t, stat, cidV1Prefix, "should contain base16 CIDv1") + }) + + t.Run("block rm respects --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + cidV0 := node.PipeStrToIPFS("test-block-rm", "block", "put", "--format=v0").Stdout.Trimmed() + require.True(t, strings.HasPrefix(cidV0, "Qm")) + + out := node.IPFS("block", "rm", cidBaseFlag, cidV0).Stdout.Trimmed() + require.Contains(t, out, cidV1Prefix, "removed block should be shown as base16 CIDv1") + require.NotContains(t, out, "Qm", "removed block should not contain CIDv0") + }) + + t.Run("dag stat respects --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + // ipfs add creates dag-pb blocks with CIDv0 by default + cidV0 := node.IPFSAddStr("test-dag-stat", "--pin=false") + require.True(t, strings.HasPrefix(cidV0, "Qm")) + + // JSON output without --cid-base has CIDv0 + out := node.IPFS("dag", "stat", "--progress=false", "--enc=json", cidV0).Stdout.Trimmed() + var data struct { + DagStats []struct{ Cid string } `json:"DagStats"` + } + require.NoError(t, json.Unmarshal([]byte(out), &data)) + require.True(t, strings.HasPrefix(data.DagStats[0].Cid, "Qm")) + + // JSON output with --cid-base has CIDv1 + out = node.IPFS("dag", "stat", "--progress=false", "--enc=json", cidBaseFlag, cidV0).Stdout.Trimmed() + require.NoError(t, json.Unmarshal([]byte(out), &data)) + require.True(t, strings.HasPrefix(data.DagStats[0].Cid, cidV1Prefix), "expected base16 CIDv1 in dag stat, got %s", data.DagStats[0].Cid) + }) + + t.Run("object patch add-link respects --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + // Parent must be a directory for add-link to work + node.IPFS("files", "mkdir", "/patch-add") + parent := node.IPFS("files", "stat", "--hash", "/patch-add").Stdout.Trimmed() + child := node.IPFSAddStr("child", "--pin=false") + + // Without --cid-base: CIDv0 + cidV0 := node.IPFS("object", "patch", "add-link", parent, "link", child).Stdout.Trimmed() + require.True(t, strings.HasPrefix(cidV0, "Qm"), "expected CIDv0, got %s", cidV0) + + // With --cid-base: CIDv1 + cidV1 := node.IPFS("object", "patch", "add-link", cidBaseFlag, parent, "link", child).Stdout.Trimmed() + require.True(t, strings.HasPrefix(cidV1, cidV1Prefix), "expected base16 CIDv1, got %s", cidV1) + }) + + t.Run("object patch rm-link respects --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + node.IPFS("files", "mkdir", "/patch-rm") + parent := node.IPFS("files", "stat", "--hash", "/patch-rm").Stdout.Trimmed() + child := node.IPFSAddStr("child", "--pin=false") + + linked := node.IPFS("object", "patch", "add-link", parent, "link", child).Stdout.Trimmed() + + cidV1 := node.IPFS("object", "patch", "rm-link", cidBaseFlag, linked, "link").Stdout.Trimmed() + require.True(t, strings.HasPrefix(cidV1, cidV1Prefix), "expected base16 CIDv1, got %s", cidV1) + }) + + t.Run("refs local respects --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + node.IPFSAddStr("refs-local-test", "--pin=false") + + lines := node.IPFS("refs", "local", cidBaseFlag).Stdout.Lines() + for _, line := range lines { + if line == "" { + continue + } + require.True(t, strings.HasPrefix(line, cidV1Prefix), "expected base16 CID, got %s", line) + } + }) + + t.Run("object diff respects --cid-base", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + + cidA := node.IPFSAddStr("aaa", "--pin=false") + cidB := node.IPFSAddStr("bbb", "--pin=false") + + // Create two directories with different children + node.IPFS("files", "mkdir", "/diff-a") + node.IPFS("files", "cp", "/ipfs/"+cidA, "/diff-a/file") + dirA := node.IPFS("files", "stat", "--hash", "/diff-a").Stdout.Trimmed() + + node.IPFS("files", "mkdir", "/diff-b") + node.IPFS("files", "cp", "/ipfs/"+cidB, "/diff-b/file") + dirB := node.IPFS("files", "stat", "--hash", "/diff-b").Stdout.Trimmed() + + // Without --cid-base: CIDs in diff output are CIDv0 + out := node.IPFS("object", "diff", dirA, dirB).Stdout.Trimmed() + require.Contains(t, out, "Qm") + + // With --cid-base: CIDs in diff output should be base16 + out = node.IPFS("object", "diff", cidBaseFlag, dirA, dirB).Stdout.Trimmed() + require.Contains(t, out, cidV1Prefix, "expected base16 CIDs in diff output") + require.NotContains(t, out, "Qm", "should not contain CIDv0 in diff output") + }) +} diff --git a/test/cli/cid_test.go b/test/cli/cid_test.go index 5e44b0db617..5a5b2667e08 100644 --- a/test/cli/cid_test.go +++ b/test/cli/cid_test.go @@ -1,17 +1,23 @@ package cli import ( + "encoding/json" "fmt" "strings" "testing" + cid "github.com/ipfs/go-cid" "github.com/ipfs/kubo/test/cli/harness" + peer "github.com/libp2p/go-libp2p/core/peer" + mhash "github.com/multiformats/go-multihash" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCidCommands(t *testing.T) { t.Parallel() + t.Run("inspect", testCidInspect) t.Run("base32", testCidBase32) t.Run("format", testCidFormat) t.Run("bases", testCidBases) @@ -19,6 +25,166 @@ func TestCidCommands(t *testing.T) { t.Run("hashes", testCidHashes) } +// testCidInspect tests 'ipfs cid inspect' subcommand +func testCidInspect(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("CIDv0", func(t *testing.T) { + res := node.RunIPFS("cid", "inspect", "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR") + assert.Equal(t, 0, res.ExitCode()) + out := res.Stdout.String() + assert.Contains(t, out, "CID: QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR") + assert.Contains(t, out, "Version: 0") + assert.Contains(t, out, "Multibase: base58btc (implicit)") + assert.Contains(t, out, "Multicodec: dag-pb (0x70, implicit)") + assert.Contains(t, out, "Multihash: sha2-256 (0x12, implicit)") + assert.Contains(t, out, " Length: 32 bytes") + assert.Contains(t, out, " Digest: c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a") + assert.Contains(t, out, "CIDv0: QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR") + assert.Contains(t, out, "CIDv1: bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + }) + + t.Run("CIDv1 base32 dag-pb", func(t *testing.T) { + res := node.RunIPFS("cid", "inspect", "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + assert.Equal(t, 0, res.ExitCode()) + out := res.Stdout.String() + assert.Contains(t, out, "CID: bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + assert.Contains(t, out, "Version: 1") + assert.Contains(t, out, "Multibase: base32 (b)") + assert.Contains(t, out, "Multicodec: dag-pb (0x70)") + assert.Contains(t, out, "Multihash: sha2-256 (0x12)") + assert.Contains(t, out, " Length: 32 bytes") + assert.Contains(t, out, " Digest: c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a") + assert.NotContains(t, out, "implicit") + assert.Contains(t, out, "CIDv0: QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR") + assert.Contains(t, out, "CIDv1: bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + }) + + t.Run("CIDv1 raw codec", func(t *testing.T) { + res := node.RunIPFS("cid", "inspect", "bafkreigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + assert.Equal(t, 0, res.ExitCode()) + out := res.Stdout.String() + assert.Contains(t, out, "CID: bafkreigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + assert.Contains(t, out, "Multibase: base32 (b)") + assert.Contains(t, out, "Multicodec: raw (0x55)") + assert.Contains(t, out, "Multihash: sha2-256 (0x12)") + assert.Contains(t, out, " Length: 32 bytes") + assert.Contains(t, out, " Digest: c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a") + assert.Contains(t, out, "CIDv0: not possible, requires dag-pb (0x70), got raw (0x55)") + assert.Contains(t, out, "CIDv1: bafkreigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + }) + + t.Run("CIDv1 base36", func(t *testing.T) { + res := node.RunIPFS("cid", "inspect", "k2jmtxw8rjh1z69c6not3wtdxb0u3urbzhyll1t9jg6ox26dhi5sfi1m") + assert.Equal(t, 0, res.ExitCode()) + out := res.Stdout.String() + assert.Contains(t, out, "CID: k2jmtxw8rjh1z69c6not3wtdxb0u3urbzhyll1t9jg6ox26dhi5sfi1m") + assert.Contains(t, out, "Multibase: base36 (k)") + assert.Contains(t, out, "Multicodec: dag-pb (0x70)") + assert.Contains(t, out, " Digest: c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a") + assert.Contains(t, out, "CIDv0: QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR") + assert.Contains(t, out, "CIDv1: bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + }) + + t.Run("invalid CID", func(t *testing.T) { + res := node.RunIPFS("cid", "inspect", "garbage") + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "invalid CID") + }) + + t.Run("PeerID as input", func(t *testing.T) { + res := node.RunIPFS("cid", "inspect", "12D3KooWD3eckifWpRn9wQpMG9R9hX3sD158z7EqHWmweQAJU5SA") + assert.Equal(t, 1, res.ExitCode()) + stderr := res.Stderr.String() + assert.Contains(t, stderr, "PeerID") + assert.Contains(t, stderr, "inspect its CID representation instead") + // suggested CID should use base36 (k prefix) + assert.Contains(t, stderr, "\n k") + }) + + t.Run("libp2p-key CID uses base36", func(t *testing.T) { + // Construct a libp2p-key CIDv1 from a known PeerID + pid, err := peer.Decode("12D3KooWD3eckifWpRn9wQpMG9R9hX3sD158z7EqHWmweQAJU5SA") + require.NoError(t, err) + pidCid := peer.ToCid(pid) + cidStr := pidCid.String() + + res := node.RunIPFS("cid", "inspect", cidStr) + assert.Equal(t, 0, res.ExitCode()) + out := res.Stdout.String() + assert.Contains(t, out, "Multicodec: libp2p-key (0x72)") + // CIDv1 should use base36 (k prefix) + assert.Contains(t, out, "CIDv1: k") + }) + + t.Run("identity multihash CID", func(t *testing.T) { + // raw codec + identity multihash: digest is the raw content ("test" = 74657374) + res := node.RunIPFS("cid", "inspect", "bafkqabdumvzxi") + assert.Equal(t, 0, res.ExitCode()) + out := res.Stdout.String() + assert.Contains(t, out, "CID: bafkqabdumvzxi") + assert.Contains(t, out, "Multicodec: raw (0x55)") + assert.Contains(t, out, "Multihash: identity (0x0)") + assert.Contains(t, out, " Length: 4 bytes") + assert.Contains(t, out, " Digest: 74657374") + }) + + t.Run("unknown codec", func(t *testing.T) { + // Construct a CID with unknown codec 0x9999 + mh, err := mhash.Sum([]byte("test"), mhash.SHA2_256, -1) + require.NoError(t, err) + unknownCID := cid.NewCidV1(0x9999, mh) + cidStr := unknownCID.String() + + res := node.RunIPFS("cid", "inspect", cidStr) + assert.Equal(t, 0, res.ExitCode()) + out := res.Stdout.String() + assert.Contains(t, out, "Multicodec: unknown (0x9999)") + assert.Contains(t, out, "not possible, requires dag-pb (0x70), got unknown (0x9999)") + }) + + t.Run("JSON output", func(t *testing.T) { + res := node.RunIPFS("cid", "inspect", "--enc=json", "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + assert.Equal(t, 0, res.ExitCode()) + + var result map[string]any + err := json.Unmarshal(res.Stdout.Bytes(), &result) + require.NoError(t, err) + + // multibase.prefix should be a string, not a number + mb := result["multibase"].(map[string]any) + assert.IsType(t, "", mb["prefix"]) + assert.Equal(t, "b", mb["prefix"]) + + // multihash.length should be a number (bytes) + mh := result["multihash"].(map[string]any) + assert.Equal(t, float64(32), mh["length"]) + + // cidV0 should be a clean CID string, no explanatory text + cidV0 := result["cidV0"].(string) + assert.True(t, strings.HasPrefix(cidV0, "Qm"), "cidV0 should be a valid CIDv0") + + // cidV1 should be a clean CID string + cidV1 := result["cidV1"].(string) + assert.True(t, strings.HasPrefix(cidV1, "b"), "cidV1 should be base32 encoded") + }) + + t.Run("JSON output with empty CIDv0", func(t *testing.T) { + // raw codec can't be CIDv0 + res := node.RunIPFS("cid", "inspect", "--enc=json", "bafkreigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + assert.Equal(t, 0, res.ExitCode()) + + var result map[string]any + err := json.Unmarshal(res.Stdout.Bytes(), &result) + require.NoError(t, err) + + // cidV0 should not be present (omitempty) + _, hasCidV0 := result["cidV0"] + assert.False(t, hasCidV0, "cidV0 should be omitted when not possible") + }) +} + // testCidBase32 tests 'ipfs cid base32' subcommand // Includes regression tests for https://github.com/ipfs/kubo/issues/9007 func testCidBase32(t *testing.T) { diff --git a/test/cli/dag_test.go b/test/cli/dag_test.go index 38457318a0c..7e28435f364 100644 --- a/test/cli/dag_test.go +++ b/test/cli/dag_test.go @@ -109,6 +109,45 @@ func TestDag(t *testing.T) { }) } +func TestDagImportCARv2(t *testing.T) { + t.Parallel() + // Regression test for https://github.com/ipfs/kubo/issues/9361 + // CARv2 import fails with "operation not supported" when using the HTTP API + // because the multipart reader doesn't support seeking, but the boxo + // ReaderFile falsely advertises io.Seeker compliance. + + carv2Fixture := "./fixtures/TestDagStatCARv2.car" + + t.Run("CARv2 import via HTTP API (online)", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + r, err := os.Open(carv2Fixture) + require.NoError(t, err) + defer r.Close() + + // Use Runner.Run (not MustRun) so the test captures errors + // instead of panicking -- this lets us assert on the result. + res := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"dag", "import", "--pin-roots=false"}, + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdin(r), + }, + }) + require.Equal(t, 0, res.ExitCode(), "CARv2 import should succeed over HTTP API, stderr: %s", res.Stderr.String()) + + // Verify the imported blocks are accessible + stat := node.RunIPFS("dag", "stat", "--progress=false", "--enc=json", fixtureCid) + var data Data + err = json.Unmarshal(stat.Stdout.Bytes(), &data) + require.NoError(t, err) + // root + node1 + node2 + shared child = 4 unique blocks + require.Equal(t, 4, data.UniqueBlocks) + }) +} + func TestDagImportFastProvide(t *testing.T) { t.Parallel() diff --git a/test/cli/delegated_routing_v1_http_client_test.go b/test/cli/delegated_routing_v1_http_client_test.go index 44e62246bef..a23ab32db58 100644 --- a/test/cli/delegated_routing_v1_http_client_test.go +++ b/test/cli/delegated_routing_v1_http_client_test.go @@ -1,14 +1,20 @@ package cli import ( + "encoding/json" + "io" "net/http" "net/http/httptest" + "strings" + "sync" "testing" + "time" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" . "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestHTTPDelegatedRouting(t *testing.T) { @@ -164,3 +170,108 @@ func TestHTTPDelegatedRouting(t *testing.T) { assert.Contains(t, resp.Body, "routing_http_client_length_count") }) } + +// TestHTTPDelegatedRoutingProviderAddrs verifies that provider records sent to +// HTTP routers contain the expected addresses based on Addresses configuration. +// See https://github.com/ipfs/kubo/issues/11213 +func TestHTTPDelegatedRoutingProviderAddrs(t *testing.T) { + t.Parallel() + + // captureProviderAddrs returns a mock server and a function to retrieve captured addresses. + captureProviderAddrs := func(t *testing.T) (*httptest.Server, func() []string) { + t.Helper() + var mu sync.Mutex + var capturedAddrs []string + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if (r.Method == http.MethodPut || r.Method == http.MethodPost) && + strings.HasPrefix(r.URL.Path, "/routing/v1/providers") { + body, _ := io.ReadAll(r.Body) + var envelope struct { + Providers []struct { + Payload json.RawMessage `json:"Payload"` + } `json:"Providers"` + } + if json.Unmarshal(body, &envelope) == nil { + for _, prov := range envelope.Providers { + var payload struct { + Addrs []string `json:"Addrs"` + } + if json.Unmarshal(prov.Payload, &payload) == nil && len(payload.Addrs) > 0 { + mu.Lock() + capturedAddrs = payload.Addrs + mu.Unlock() + } + } + } + w.WriteHeader(http.StatusOK) + return + } + if strings.HasPrefix(r.URL.Path, "/routing/v1/") { + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + })) + t.Cleanup(srv.Close) + return srv, func() []string { + mu.Lock() + defer mu.Unlock() + return capturedAddrs + } + } + + customRoutingConf := func(endpoint string) map[string]any { + return map[string]any{ + "Type": "custom", + "Methods": map[string]any{ + "provide": map[string]any{"RouterName": "TestRouter"}, + "find-providers": map[string]any{"RouterName": "TestRouter"}, + "find-peers": map[string]any{"RouterName": "TestRouter"}, + "get-ipns": map[string]any{"RouterName": "TestRouter"}, + "put-ipns": map[string]any{"RouterName": "TestRouter"}, + }, + "Routers": map[string]any{ + "TestRouter": map[string]any{ + "Type": "http", + "Parameters": map[string]any{"Endpoint": endpoint}, + }, + }, + } + } + + t.Run("provider records respect user-provided Addresses.Announce override", func(t *testing.T) { + t.Parallel() + srv, getAddrs := captureProviderAddrs(t) + + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Addresses.Announce", []string{"/ip4/1.2.3.4/tcp/4001"}) + node.SetIPFSConfig("Routing", customRoutingConf(srv.URL)) + node.StartDaemon() + defer node.StopDaemon() + + cidStr := node.IPFSAddStr(time.Now().String()) + node.IPFS("routing", "provide", cidStr) + + addrs := getAddrs() + require.NotEmpty(t, addrs, "provider record should contain addresses") + assert.Equal(t, []string{"/ip4/1.2.3.4/tcp/4001"}, addrs) + }) + + t.Run("provider records respect user-provided Addresses.AppendAnnounce", func(t *testing.T) { + t.Parallel() + srv, getAddrs := captureProviderAddrs(t) + + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Addresses.AppendAnnounce", []string{"/ip4/5.6.7.8/tcp/4001"}) + node.SetIPFSConfig("Routing", customRoutingConf(srv.URL)) + node.StartDaemon() + defer node.StopDaemon() + + cidStr := node.IPFSAddStr(time.Now().String()) + node.IPFS("routing", "provide", cidStr) + + addrs := getAddrs() + require.NotEmpty(t, addrs, "provider record should contain addresses") + assert.Contains(t, addrs, "/ip4/5.6.7.8/tcp/4001", "AppendAnnounce address should be present") + }) +} diff --git a/test/cli/delegated_routing_v1_http_server_test.go b/test/cli/delegated_routing_v1_http_server_test.go index 503dba39b79..e6f5867fc49 100644 --- a/test/cli/delegated_routing_v1_http_server_test.go +++ b/test/cli/delegated_routing_v1_http_server_test.go @@ -252,7 +252,9 @@ func TestRoutingV1Server(t *testing.T) { // Wait for WAN DHT routing table to be populated. // The server has a 30-second routing timeout, so we use 60 seconds // per request to allow for network latency while preventing hangs. - // Total wait time is 2 minutes (locally passes in under 1 minute). + // Total wait time is 5 minutes to accommodate slow CI DHT bootstrapping. + // Passing runs finish in 8-48s; failures are total bootstrap failures, + // not slow convergence, so extra headroom doesn't waste time on success. var records []*types.PeerRecord require.EventuallyWithT(t, func(ct *assert.CollectT) { ctx, cancel := context.WithTimeout(t.Context(), 60*time.Second) @@ -263,7 +265,7 @@ func TestRoutingV1Server(t *testing.T) { } records, err = iter.ReadAllResults(resultsIter) assert.NoError(ct, err) - }, 2*time.Minute, 5*time.Second) + }, 5*time.Minute, 5*time.Second) // Verify we got some peers back from WAN DHT require.NotEmpty(t, records, "should return peers close to own peerid") diff --git a/test/cli/diag_datastore_test.go b/test/cli/diag_datastore_test.go index 2a69f60cc57..d1b429d3376 100644 --- a/test/cli/diag_datastore_test.go +++ b/test/cli/diag_datastore_test.go @@ -2,6 +2,8 @@ package cli import ( "encoding/json" + "os" + "path/filepath" "testing" "github.com/ipfs/kubo/test/cli/harness" @@ -130,6 +132,18 @@ func TestDiagDatastore(t *testing.T) { assert.Contains(t, res.Stderr.String(), "key not found") }) + t.Run("diag datastore put and get roundtrip", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + node.DatastorePut("/test/roundtrip", "hello world") + assert.True(t, node.DatastoreHasKey("/test/roundtrip")) + assert.Equal(t, []byte("hello world"), node.DatastoreGet("/test/roundtrip")) + + count := node.DatastoreCount("/test/") + assert.Equal(t, int64(1), count) + }) + t.Run("diag datastore commands require daemon to be stopped", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() @@ -144,4 +158,69 @@ func TestDiagDatastore(t *testing.T) { assert.Error(t, res.Err, "count should fail when daemon is running") assert.Contains(t, res.Stderr.String(), "ipfs daemon is running") }) + + t.Run("provider keystore datastores are visible in unified view", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + + // Start daemon to create the provider-keystore datastores, then add data + node.StartDaemon() + cid := node.IPFSAddStr("data for provider keystore test") + node.IPFS("pin", "add", cid) + node.StopDaemon() + + // Verify the provider-keystore directory was created + keystorePath := filepath.Join(node.Dir, "provider-keystore") + _, err := os.Stat(keystorePath) + require.NoError(t, err, "provider-keystore directory should exist after sweep-enabled daemon ran") + + // Count entries in each keystore namespace via the unified view + for _, prefix := range []string{"/provider/keystore/0/", "/provider/keystore/1/"} { + res := node.IPFS("diag", "datastore", "count", prefix) + assert.NoError(t, res.Err) + t.Logf("count %s: %s", prefix, res.Stdout.String()) + } + + // The total count under /provider/keystore/ should include entries + // from both keystore instances (0 and 1) + count := node.DatastoreCount("/provider/keystore/") + t.Logf("total /provider/keystore/ entries: %d", count) + assert.Greater(t, count, int64(0), "should have provider keystore entries") + }) + + t.Run("provider keystore count JSON output", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + + node.StartDaemon() + node.StopDaemon() + + res := node.IPFS("diag", "datastore", "count", "/provider/keystore/0/", "--enc=json") + assert.NoError(t, res.Err) + + var result struct { + Prefix string `json:"prefix"` + Count int64 `json:"count"` + } + err := json.Unmarshal(res.Stdout.Bytes(), &result) + require.NoError(t, err) + assert.Equal(t, "/provider/keystore/0/", result.Prefix) + assert.GreaterOrEqual(t, result.Count, int64(0), "count should be non-negative") + }) + + t.Run("works without provider keystore", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // No sweep enabled, no provider-keystore dirs — should still work fine + count := node.DatastoreCount("/provider/keystore/0/") + assert.Zero(t, count) + + count = node.DatastoreCount("/") + assert.Greater(t, count, int64(0)) + }) } diff --git a/test/cli/files_test.go b/test/cli/files_test.go index 69dead728a8..dbf30b1b750 100644 --- a/test/cli/files_test.go +++ b/test/cli/files_test.go @@ -848,6 +848,73 @@ func TestFilesMFSImportConfig(t *testing.T) { require.Equal(t, ft.THAMTShard, fsType, "expected HAMT directory after exceeding size threshold") }) + // Regression tests for https://github.com/ipfs/boxo/pull/1125 + // CidBuilder (CID version + hash function) must be preserved across + // file mutations, directory creation, and daemon restarts. We use + // CIDv1 + sha2-512 so assertions are meaningful even if CIDv1 or a + // different hash becomes the default in the future. + + t.Run("CidBuilder preserved across file mutation and restart", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.CidVersion = *config.NewOptionalInteger(1) + cfg.Import.HashFunction = *config.NewOptionalString("sha2-512") + }) + node.StartDaemon() + + requireCidBuilder := func(mfsPath, context string) { + t.Helper() + cidStr := node.IPFS("files", "stat", "--hash", mfsPath).Stdout.Trimmed() + prefix := node.IPFS("cid", "format", "-f", "%V-%h", cidStr).Stdout.Trimmed() + require.Equal(t, "1-sha2-512", prefix, "%s: expected CIDv1+sha2-512 for %s, got %s (cid: %s)", context, mfsPath, prefix, cidStr) + } + + // 1. files write --create: new file + tempFile := filepath.Join(node.Dir, "test.txt") + require.NoError(t, os.WriteFile(tempFile, []byte("hello world"), 0644)) + node.IPFS("files", "write", "--create", "/test.txt", tempFile) + requireCidBuilder("/test.txt", "initial write") + + // 2. files write --offset: mutate existing file (setNodeData) + cidBefore := node.IPFS("files", "stat", "--hash", "/test.txt").Stdout.Trimmed() + patch := filepath.Join(node.Dir, "patch.txt") + require.NoError(t, os.WriteFile(patch, []byte("PATCHED"), 0644)) + node.IPFS("files", "write", "--offset", "0", "/test.txt", patch) + requireCidBuilder("/test.txt", "after offset write") + cidAfter := node.IPFS("files", "stat", "--hash", "/test.txt").Stdout.Trimmed() + require.NotEqual(t, cidBefore, cidAfter, "CID should change after mutation") + + // 3. files mkdir -p: all intermediate directories + node.IPFS("files", "mkdir", "-p", "/a/b/c") + for _, dir := range []string{"/a", "/a/b", "/a/b/c"} { + requireCidBuilder(dir, "mkdir -p") + } + + // 4. files write --create inside a subdirectory + node.IPFS("files", "write", "--create", "/a/b/nested.txt", tempFile) + requireCidBuilder("/a/b/nested.txt", "write in subdir") + + // 5. root directory + requireCidBuilder("/", "root before restart") + + // 6. daemon restart: NewRoot must preserve CidBuilder + node.StopDaemon() + node.StartDaemon() + defer node.StopDaemon() + + requireCidBuilder("/", "root after restart") + requireCidBuilder("/test.txt", "file after restart") + requireCidBuilder("/a/b/c", "dir after restart") + + // 7. new entries created after restart + require.NoError(t, os.WriteFile(tempFile, []byte("post-restart"), 0644)) + node.IPFS("files", "write", "--create", "/post-restart.txt", tempFile) + node.IPFS("files", "mkdir", "/post-restart-dir") + requireCidBuilder("/post-restart.txt", "new file after restart") + requireCidBuilder("/post-restart-dir", "new dir after restart") + }) + t.Run("config change takes effect after daemon restart", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init() diff --git a/test/cli/fixtures/TestDagStatCARv2.car b/test/cli/fixtures/TestDagStatCARv2.car new file mode 100644 index 00000000000..f08c0216fcd Binary files /dev/null and b/test/cli/fixtures/TestDagStatCARv2.car differ diff --git a/test/cli/fuse/fuse_test.go b/test/cli/fuse/fuse_test.go new file mode 100644 index 00000000000..fb6915f85be --- /dev/null +++ b/test/cli/fuse/fuse_test.go @@ -0,0 +1,502 @@ +//go:build (linux || darwin || freebsd) && !nofuse + +// Package fuse contains end-to-end FUSE integration tests that exercise +// mount/unmount and filesystem operations through a real ipfs daemon. +// +// These tests complement the unit tests in fuse/readonly/, fuse/ipns/, +// and fuse/mfs/ which test the FUSE filesystem implementations directly +// (without a daemon) via fusetest.TestMount. +// +// All tests here are gated by testutils.RequiresFUSE (TEST_FUSE env var). +// CI runs them via `make test_fuse_cli` inside the fuse-tests job. +package fuse + +import ( + "bytes" + "crypto/rand" + "os" + "os/exec" + "path/filepath" + "runtime" + "sort" + "strings" + "syscall" + "testing" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/require" +) + +func TestFUSE(t *testing.T) { + testutils.RequiresFUSE(t) + t.Parallel() + + t.Run("mount and unmount work correctly", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + ipfsMount, ipnsMount, mfsMount := mountAll(t, node) + + // Test basic MFS functionality via FUSE mount + testFile := filepath.Join(mfsMount, "testfile") + testContent := "hello fuse world" + + err := os.WriteFile(testFile, []byte(testContent), 0644) + require.NoError(t, err) + + // Verify file appears in MFS via IPFS commands + result := node.IPFS("files", "ls", "/") + require.Contains(t, result.Stdout.String(), "testfile") + + // Read content back via MFS FUSE mount + readContent, err := os.ReadFile(testFile) + require.NoError(t, err) + require.Equal(t, testContent, string(readContent)) + + // Get the CID of the MFS file + result = node.IPFS("files", "stat", "/testfile", "--format=") + fileCID := strings.TrimSpace(result.Stdout.String()) + require.NotEmpty(t, fileCID, "should have a CID for the MFS file") + + // Read the same content via IPFS FUSE mount using the CID + ipfsFile := filepath.Join(ipfsMount, fileCID) + ipfsContent, err := os.ReadFile(ipfsFile) + require.NoError(t, err) + require.Equal(t, testContent, string(ipfsContent), "content should match between MFS and IPFS mounts") + + // Verify both FUSE mounts return identical data + require.Equal(t, readContent, ipfsContent, "MFS and IPFS FUSE mounts should return identical data") + + // Test that mount directories cannot be removed while mounted + err = os.Remove(ipfsMount) + require.Error(t, err, "should not be able to remove mounted directory") + + // Stop daemon, which should trigger automatic unmount + node.StopDaemon() + + // Verify directories can now be removed (indicating successful unmount) + require.NoError(t, os.Remove(ipfsMount)) + require.NoError(t, os.Remove(ipnsMount)) + require.NoError(t, os.Remove(mfsMount)) + }) + + t.Run("explicit unmount works", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + ipfsMount, ipnsMount, mfsMount := mountAll(t, node) + + doUnmount(t, ipfsMount, true) + doUnmount(t, ipnsMount, true) + doUnmount(t, mfsMount, true) + + // Verify directories can be removed after explicit unmount + require.NoError(t, os.Remove(ipfsMount)) + require.NoError(t, os.Remove(ipnsMount)) + require.NoError(t, os.Remove(mfsMount)) + + node.StopDaemon() + }) + + t.Run("mount fails when dirs missing", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + res := node.RunIPFS("mount", "-f=not_ipfs", "-n=not_ipns", "-m=not_mfs") + require.Error(t, res.Err) + require.Empty(t, res.Stdout.String()) + stderr := res.Stderr.String() + require.True(t, + strings.Contains(stderr, "not_ipfs") || + strings.Contains(stderr, "not_ipns") || + strings.Contains(stderr, "not_mfs"), + "error should mention missing mount dir, got: %s", stderr) + + node.StopDaemon() + }) + + t.Run("IPNS local symlink", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + _, ipnsMount, _ := mountAll(t, node) + + target, err := os.Readlink(filepath.Join(ipnsMount, "local")) + require.NoError(t, err) + require.Equal(t, node.PeerID().String(), filepath.Base(target)) + + node.StopDaemon() + }) + + t.Run("IPNS name resolution via NS map", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + + // Add content offline (before daemon starts) + expectedFile := filepath.Join(node.Dir, "expected") + require.NoError(t, os.WriteFile(expectedFile, []byte("ipfs"), 0644)) + wrappedCID := node.IPFS("add", "--cid-version", "1", "-Q", "-w", expectedFile).Stdout.Trimmed() + + // Set IPFS_NS_MAP so the daemon resolves welcome.example.com + node.Runner.Env["IPFS_NS_MAP"] = "welcome.example.com:/ipfs/" + wrappedCID + + node.StartDaemon() + _, ipnsMount, _ := mountAll(t, node) + + // Read the file through IPNS FUSE mount using the DNS name + content, err := os.ReadFile(filepath.Join(ipnsMount, "welcome.example.com", "expected")) + require.NoError(t, err) + require.Equal(t, "ipfs", string(content)) + + node.StopDaemon() + }) + + t.Run("MFS file and dir creation", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + _, _, mfsMount := mountAll(t, node) + + // Create file via FUSE + require.NoError(t, os.WriteFile(filepath.Join(mfsMount, "testfile"), []byte("content"), 0644)) + result := node.IPFS("files", "ls", "/") + require.Contains(t, result.Stdout.String(), "testfile") + + // Create dir via FUSE + require.NoError(t, os.Mkdir(filepath.Join(mfsMount, "testdir"), 0755)) + result = node.IPFS("files", "ls", "/") + require.Contains(t, result.Stdout.String(), "testdir") + + node.StopDaemon() + }) + + t.Run("MFS xattr", func(t *testing.T) { + t.Parallel() + if runtime.GOOS != "linux" { + t.Skip("xattr requires Linux") + } + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + _, _, mfsMount := mountAll(t, node) + + testFile := filepath.Join(mfsMount, "testfile") + require.NoError(t, os.WriteFile(testFile, []byte("content"), 0644)) + + cid, err := getXattr(testFile, "ipfs.cid") + require.NoError(t, err) + require.NotEmpty(t, cid) + + node.StopDaemon() + }) + + t.Run("files write then read via FUSE", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + _, _, mfsMount := mountAll(t, node) + + // Write via ipfs files write -e, read back via FUSE + node.PipeStrToIPFS("content3", "files", "write", "-e", "/testfile3") + + got, err := os.ReadFile(filepath.Join(mfsMount, "testfile3")) + require.NoError(t, err) + require.Equal(t, "content3", string(got)) + + node.StopDaemon() + }) + + t.Run("add --to-files then read via FUSE", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + _, _, mfsMount := mountAll(t, node) + + // Create a temp file to add + tmpFile := filepath.Join(node.Dir, "testfile2") + require.NoError(t, os.WriteFile(tmpFile, []byte("content"), 0644)) + + node.IPFS("add", "--to-files", "/testfile2", tmpFile) + + got, err := os.ReadFile(filepath.Join(mfsMount, "testfile2")) + require.NoError(t, err) + require.Equal(t, "content", string(got)) + + node.StopDaemon() + }) + + t.Run("file removal via FUSE", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + _, _, mfsMount := mountAll(t, node) + + testFile := filepath.Join(mfsMount, "testfile") + require.NoError(t, os.WriteFile(testFile, []byte("content"), 0644)) + + result := node.IPFS("files", "ls", "/") + require.Contains(t, result.Stdout.String(), "testfile") + + require.NoError(t, os.Remove(testFile)) + + result = node.IPFS("files", "ls", "/") + require.NotContains(t, result.Stdout.String(), "testfile") + + node.StopDaemon() + }) + + t.Run("nested dirs via FUSE", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + _, _, mfsMount := mountAll(t, node) + + nested := filepath.Join(mfsMount, "foo", "bar", "baz", "qux") + require.NoError(t, os.MkdirAll(nested, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(nested, "quux"), []byte("content"), 0644)) + + result := node.IPFS("files", "stat", "/foo/bar/baz/qux/quux") + require.NoError(t, result.Err) + + node.StopDaemon() + }) + + t.Run("publish blocked while IPNS mounted", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + // Add content and publish before mount + hash := node.PipeStrToIPFS("hello warld", "add", "-Q", "-w", "--stdin-name", "file").Stdout.Trimmed() + node.IPFS("name", "publish", hash) + + // Mount all + _, ipnsMount, _ := mountAll(t, node) + + // Publish should fail while IPNS is mounted + res := node.RunIPFS("name", "publish", hash) + require.Error(t, res.Err) + require.Contains(t, res.Stderr.String(), "cannot manually publish while IPNS is mounted") + + // Unmount IPNS out-of-band + doUnmount(t, ipnsMount, true) + + // Publish should work again + node.IPFS("name", "publish", hash) + + node.StopDaemon() + }) + + // Exercises both ftruncate(fd, size) and truncate(path, size). + // ftruncate uses the open file handle in Setattr; truncate opens + // a temporary write descriptor. Both must leave the file with + // correct content visible via the FUSE mount and via ipfs files. + t.Run("truncation via FUSE", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + _, _, mfsMount := mountAll(t, node) + + original := make([]byte, 2000) + _, err := rand.Read(original) + require.NoError(t, err) + + path := filepath.Join(mfsMount, "trunctest") + require.NoError(t, os.WriteFile(path, original, 0644)) + + // ftruncate(fd, 500): open, truncate via fd, close. + t.Run("ftruncate via fd", func(t *testing.T) { + f, err := os.OpenFile(path, os.O_WRONLY, 0644) + require.NoError(t, err) + require.NoError(t, f.Truncate(500)) + require.NoError(t, f.Close()) + + info, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, int64(500), info.Size()) + + got, err := os.ReadFile(path) + require.NoError(t, err) + require.True(t, bytes.Equal(original[:500], got), + "ftruncated content should match first 500 bytes of original") + + // Verify via ipfs files stat + stat := node.IPFS("files", "stat", "/trunctest", "--format=") + require.Equal(t, "500", strings.TrimSpace(stat.Stdout.String())) + }) + + // truncate(path, 200): no open fd, Setattr opens a temporary + // write descriptor. + t.Run("truncate via path", func(t *testing.T) { + require.NoError(t, syscall.Truncate(path, 200)) + + info, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, int64(200), info.Size()) + + got, err := os.ReadFile(path) + require.NoError(t, err) + require.True(t, bytes.Equal(original[:200], got), + "path-truncated content should match first 200 bytes of original") + + stat := node.IPFS("files", "stat", "/trunctest", "--format=") + require.Equal(t, "200", strings.TrimSpace(stat.Stdout.String())) + }) + + // Truncate to zero and rewrite: the common open(O_TRUNC) pattern. + t.Run("truncate to zero and rewrite", func(t *testing.T) { + newContent := []byte("brand new content") + f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0644) + require.NoError(t, err) + _, err = f.Write(newContent) + require.NoError(t, err) + require.NoError(t, f.Close()) + + got, err := os.ReadFile(path) + require.NoError(t, err) + require.Equal(t, newContent, got) + }) + + node.StopDaemon() + }) + + t.Run("sharded directory read via FUSE", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + + // Force sharding with 1B threshold + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.UnixFSHAMTDirectorySizeThreshold = *config.NewOptionalBytes("1B") + }) + + node.StartDaemon() + ipfsMount, _, _ := mountAll(t, node) + + // Create test data directory + testdataDir := filepath.Join(node.Dir, "testdata") + require.NoError(t, os.MkdirAll(filepath.Join(testdataDir, "subdir"), 0755)) + require.NoError(t, os.WriteFile(filepath.Join(testdataDir, "a"), []byte("a\n"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(testdataDir, "subdir", "b"), []byte("b\n"), 0644)) + + // Add sharded directory + hash := node.IPFS("add", "-r", "-Q", testdataDir).Stdout.Trimmed() + + // Read files via FUSE /ipfs mount + contentA, err := os.ReadFile(filepath.Join(ipfsMount, hash, "a")) + require.NoError(t, err) + require.Equal(t, "a\n", string(contentA)) + + contentB, err := os.ReadFile(filepath.Join(ipfsMount, hash, "subdir", "b")) + require.NoError(t, err) + require.Equal(t, "b\n", string(contentB)) + + // List directories via FUSE + entries, err := os.ReadDir(filepath.Join(ipfsMount, hash)) + require.NoError(t, err) + names := make([]string, len(entries)) + for i, e := range entries { + names[i] = e.Name() + } + sort.Strings(names) + require.Equal(t, []string{"a", "subdir"}, names) + + subEntries, err := os.ReadDir(filepath.Join(ipfsMount, hash, "subdir")) + require.NoError(t, err) + require.Len(t, subEntries, 1) + require.Equal(t, "b", subEntries[0].Name()) + + node.StopDaemon() + }) +} + +// mountAll creates mount directories and mounts IPFS, IPNS, and MFS. +func mountAll(t *testing.T, node *harness.Node) (ipfsMount, ipnsMount, mfsMount string) { + t.Helper() + ipfsMount = filepath.Join(node.Dir, "ipfs") + ipnsMount = filepath.Join(node.Dir, "ipns") + mfsMount = filepath.Join(node.Dir, "mfs") + + require.NoError(t, os.MkdirAll(ipfsMount, 0755)) + require.NoError(t, os.MkdirAll(ipnsMount, 0755)) + require.NoError(t, os.MkdirAll(mfsMount, 0755)) + + // Lazy-unmount any stale mounts from a previous crashed run so + // the mountpoint is free. Non-fatal: the dir may not be mounted. + lazyUnmount(ipfsMount) + lazyUnmount(ipnsMount) + lazyUnmount(mfsMount) + + result := node.IPFS("mount", "-f", ipfsMount, "-n", ipnsMount, "-m", mfsMount) + + // Extra space after "MFS" matches the column-aligned output produced + // by MountCmd in core/commands/mount_unix.go. + expectedOutput := "IPFS mounted at: " + ipfsMount + "\n" + + "IPNS mounted at: " + ipnsMount + "\n" + + "MFS mounted at: " + mfsMount + "\n" + require.Equal(t, expectedOutput, result.Stdout.String()) + + return +} + +// doUnmount performs platform-specific unmount, similar to sharness do_umount. +// If failOnError is true, unmount errors cause test failure; otherwise errors are ignored. +func doUnmount(t *testing.T, mountPoint string, failOnError bool) { + t.Helper() + var cmd *exec.Cmd + switch runtime.GOOS { + case "linux": + if _, err := exec.LookPath("fusermount3"); err == nil { + cmd = exec.Command("fusermount3", "-u", mountPoint) + } else { + cmd = exec.Command("fusermount", "-u", mountPoint) + } + default: + cmd = exec.Command("umount", mountPoint) + } + + err := cmd.Run() + if err != nil && failOnError { + t.Fatalf("failed to unmount %s: %v", mountPoint, err) + } +} + +// lazyUnmount detaches a mount point without waiting for open files +// to close. Used to clean up stale mounts from crashed test runs. +func lazyUnmount(mountPoint string) { + switch runtime.GOOS { + case "linux": + if _, err := exec.LookPath("fusermount3"); err == nil { + _ = exec.Command("fusermount3", "-uz", mountPoint).Run() + } else { + _ = exec.Command("fusermount", "-uz", mountPoint).Run() + } + default: + _ = exec.Command("umount", "-l", mountPoint).Run() + } +} diff --git a/test/cli/fuse/realworld_test.go b/test/cli/fuse/realworld_test.go new file mode 100644 index 00000000000..af2e0d1249c --- /dev/null +++ b/test/cli/fuse/realworld_test.go @@ -0,0 +1,569 @@ +//go:build (linux || darwin || freebsd) && !nofuse + +// End-to-end FUSE coverage with real POSIX tools. +// +// TestFUSERealWorld spins up one ipfs daemon, mounts /ipfs, /ipns, and +// /mfs, and exercises the writable mount through the actual binaries +// users invoke (cat, ls, cp, mv, rm, ln, find, dd, sha256sum, tar, +// rsync, vim, sh, wc). Each subtest verifies the result both via the +// FUSE filesystem and via the daemon's `ipfs files` view. +// +// All external tools are required: a missing binary fails the test +// instead of skipping, so a CI image change cannot silently turn this +// suite green. The whole-suite TEST_FUSE gate is the only place a +// developer is allowed to skip. +// +// Synthetic file payloads default to 1 MiB + 1 byte so multi-chunk +// read/write paths and chunk-boundary off-by-ones are exercised. + +package fuse + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/require" +) + +// payloadSize is the default test payload size: 1 MiB + 1 byte. +// Forces multi-chunk DAG construction so single-chunk fast paths +// cannot mask cross-block bugs. +const payloadSize = 1024*1024 + 1 + +func TestFUSERealWorld(t *testing.T) { + testutils.RequiresFUSE(t) + + node := harness.NewT(t).NewNode().Init() + // StoreMtime/StoreMode on so rsync -a, tar -p, vim's chmod, and + // any other tool that round-trips POSIX metadata see consistent + // behaviour. The flags only affect the writable mounts. + node.UpdateConfig(func(cfg *config.Config) { + cfg.Mounts.StoreMtime = config.True + cfg.Mounts.StoreMode = config.True + }) + node.StartDaemon() + defer node.StopDaemon() + + _, _, mfsMount := mountAll(t, node) + + // requireTool fails the current subtest if bin is not in PATH. + // External tools are part of the test contract: a missing binary + // is a hidden coverage gap and we want a loud failure. + requireTool := func(t *testing.T, bins ...string) { + t.Helper() + for _, bin := range bins { + if _, err := exec.LookPath(bin); err != nil { + t.Fatalf("%s not in PATH; required for end-to-end FUSE tests", bin) + } + } + } + + // workdir creates a unique subdirectory under the mount for the + // current subtest. Subtests share one daemon and one mount; using + // disjoint subdirectories keeps them from colliding. + workdir := func(t *testing.T, name string) string { + t.Helper() + d := filepath.Join(mfsMount, name) + require.NoError(t, os.Mkdir(d, 0o755)) + return d + } + + // runCmd runs an external binary and fails the test on error, + // printing both stdout and stderr in the failure message. + // + // LC_ALL=C forces the C locale so any locale-sensitive output + // (date formats in `ls -l`, decimal separators in `wc` output on + // some locales, localized error messages, collation order from + // `find` and `ls`) is deterministic regardless of how the runner + // is configured. Without this the same test could pass on a US + // runner and fail on one with LC_ALL=de_DE.UTF-8. + runCmd := func(t *testing.T, name string, args ...string) string { + t.Helper() + cmd := exec.Command(name, args...) + cmd.Env = append(os.Environ(), "LC_ALL=C") + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + t.Fatalf("%s %v failed: %v\nstdout: %s\nstderr: %s", + name, args, err, stdout.String(), stderr.String()) + } + return stdout.String() + } + + // randBytes returns n cryptographically random bytes. + randBytes := func(t *testing.T, n int) []byte { + t.Helper() + b := make([]byte, n) + _, err := rand.Read(b) + require.NoError(t, err) + return b + } + + // ----- Shell and core POSIX ----- + + t.Run("echo_redirect_and_cat", func(t *testing.T) { + requireTool(t, "sh", "cat") + dir := workdir(t, "echo_redirect_and_cat") + path := filepath.Join(dir, "greeting") + + runCmd(t, "sh", "-c", "echo 'hello fuse' > "+path) + + got := runCmd(t, "cat", path) + require.Equal(t, "hello fuse\n", got, "cat output via FUSE") + + // Cross-verify via daemon's MFS view (bypasses FUSE). + ipfsView := node.IPFS("files", "read", "/echo_redirect_and_cat/greeting").Stdout.String() + require.Equal(t, "hello fuse\n", ipfsView, "ipfs files read view") + }) + + t.Run("seq_pipe_to_file_and_wc", func(t *testing.T) { + requireTool(t, "sh", "seq", "wc") + dir := workdir(t, "seq_pipe_to_file_and_wc") + path := filepath.Join(dir, "lines") + + // 200000 lines: about 1.3 MB of text, comfortably more than + // one UnixFS chunk under the default chunker. + runCmd(t, "sh", "-c", "seq 1 200000 > "+path) + + lineCount := strings.Fields(runCmd(t, "wc", "-l", path))[0] + require.Equal(t, "200000", lineCount) + + // File size should match: digits + newline per line. + // sum_{i=1..9} i*9*1 + sum_{i=10..99} i*90*2 + ... easier to + // just stat the file and compare against wc -c. + byteCount := strings.Fields(runCmd(t, "wc", "-c", path))[0] + info, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, strconv.FormatInt(info.Size(), 10), byteCount, + "wc -c and stat agree on the multi-chunk file size") + require.Greater(t, info.Size(), int64(payloadSize), + "file should be larger than one chunk") + }) + + t.Run("ls_l_shows_mode_and_size", func(t *testing.T) { + requireTool(t, "ls") + dir := workdir(t, "ls_l_shows_mode_and_size") + path := filepath.Join(dir, "file") + + data := randBytes(t, payloadSize) + require.NoError(t, os.WriteFile(path, data, 0o644)) + + // `ls -l` line layout: + out := runCmd(t, "ls", "-l", path) + fields := strings.Fields(out) + require.GreaterOrEqual(t, len(fields), 8, "ls -l output: %q", out) + + require.True(t, strings.HasPrefix(fields[0], "-rw-r--r--"), + "mode field %q should be -rw-r--r--", fields[0]) + require.Equal(t, strconv.Itoa(payloadSize), fields[4], + "size field should match payload size") + }) + + t.Run("stat_reports_default_mode", func(t *testing.T) { + requireTool(t, "stat") + dir := workdir(t, "stat_reports_default_mode") + path := filepath.Join(dir, "file") + + f, err := os.Create(path) + require.NoError(t, err) + require.NoError(t, f.Close()) + + out := strings.TrimSpace(runCmd(t, "stat", "-c", "%a %s", path)) + require.Equal(t, "644 0", out, "stat -c '%%a %%s' on a fresh file") + }) + + t.Run("cp_file_in", func(t *testing.T) { + requireTool(t, "cp") + dir := workdir(t, "cp_file_in") + + src := filepath.Join(node.Dir, "cp_file_in_src") + want := randBytes(t, payloadSize) + require.NoError(t, os.WriteFile(src, want, 0o644)) + + dst := filepath.Join(dir, "cp-in") + runCmd(t, "cp", src, dst) + + got, err := os.ReadFile(dst) + require.NoError(t, err) + require.True(t, bytes.Equal(want, got), "FUSE read-back differs") + + // Cross-verify via daemon. ipfs files read can return huge + // blobs; compare lengths first to fail fast. + daemonView := node.IPFS("files", "read", "/cp_file_in/cp-in").Stdout.Bytes() + require.Equal(t, len(want), len(daemonView), "daemon view length") + require.True(t, bytes.Equal(want, daemonView), "daemon view content") + }) + + t.Run("cp_r_tree_in", func(t *testing.T) { + requireTool(t, "cp") + dir := workdir(t, "cp_r_tree_in") + + // Build the source tree under node.Dir. + srcRoot := filepath.Join(node.Dir, "cp_r_tree_in_src") + require.NoError(t, os.MkdirAll(filepath.Join(srcRoot, "a", "b", "c"), 0o755)) + + topData := randBytes(t, payloadSize) + leafData := randBytes(t, payloadSize) + require.NoError(t, os.WriteFile(filepath.Join(srcRoot, "top.bin"), topData, 0o644)) + require.NoError(t, os.WriteFile(filepath.Join(srcRoot, "a", "b", "c", "leaf.bin"), leafData, 0o644)) + + runCmd(t, "cp", "-r", srcRoot, dir+"/") + + // Walk the FUSE side and assert both files match. + gotTop, err := os.ReadFile(filepath.Join(dir, "cp_r_tree_in_src", "top.bin")) + require.NoError(t, err) + require.True(t, bytes.Equal(topData, gotTop), "top file content") + + gotLeaf, err := os.ReadFile(filepath.Join(dir, "cp_r_tree_in_src", "a", "b", "c", "leaf.bin")) + require.NoError(t, err) + require.True(t, bytes.Equal(leafData, gotLeaf), "leaf file content") + + // Cross-verify the deepest file via the daemon. + daemonView := node.IPFS("files", "read", + "/cp_r_tree_in/cp_r_tree_in_src/a/b/c/leaf.bin").Stdout.Bytes() + require.True(t, bytes.Equal(leafData, daemonView), "daemon view of deepest leaf") + }) + + t.Run("cp_file_out", func(t *testing.T) { + requireTool(t, "cp") + dir := workdir(t, "cp_file_out") + + want := randBytes(t, payloadSize) + src := filepath.Join(dir, "payload") + require.NoError(t, os.WriteFile(src, want, 0o644)) + + dst := filepath.Join(node.Dir, "cp_file_out_dst") + runCmd(t, "cp", src, dst) + + got, err := os.ReadFile(dst) + require.NoError(t, err) + require.True(t, bytes.Equal(want, got), "exported file content") + }) + + t.Run("mv_atomic_save", func(t *testing.T) { + requireTool(t, "mv") + dir := workdir(t, "mv_atomic_save") + + oldData := randBytes(t, payloadSize) + newData := randBytes(t, payloadSize) + + target := filepath.Join(dir, "target") + tmp := filepath.Join(dir, ".target.tmp") + + require.NoError(t, os.WriteFile(target, oldData, 0o644)) + require.NoError(t, os.WriteFile(tmp, newData, 0o644)) + + runCmd(t, "mv", tmp, target) + + got, err := os.ReadFile(target) + require.NoError(t, err) + require.True(t, bytes.Equal(newData, got), "target should now hold new data") + + _, err = os.Stat(tmp) + require.True(t, os.IsNotExist(err), "tmp should be gone after mv") + }) + + t.Run("rm_rf_tree", func(t *testing.T) { + requireTool(t, "cp", "rm") + dir := workdir(t, "rm_rf_tree") + + // Build a tree the same shape as cp_r_tree_in. + srcRoot := filepath.Join(node.Dir, "rm_rf_tree_src") + require.NoError(t, os.MkdirAll(filepath.Join(srcRoot, "a", "b", "c"), 0o755)) + require.NoError(t, os.WriteFile(filepath.Join(srcRoot, "top.bin"), randBytes(t, payloadSize), 0o644)) + require.NoError(t, os.WriteFile(filepath.Join(srcRoot, "a", "b", "c", "leaf.bin"), randBytes(t, payloadSize), 0o644)) + + runCmd(t, "cp", "-r", srcRoot, dir+"/") + copied := filepath.Join(dir, "rm_rf_tree_src") + + // Sanity: tree exists. + _, err := os.Stat(filepath.Join(copied, "a", "b", "c", "leaf.bin")) + require.NoError(t, err) + + runCmd(t, "rm", "-rf", copied) + + _, err = os.Stat(copied) + require.True(t, os.IsNotExist(err), "copied tree should be gone") + + // Cross-verify the daemon no longer lists the subtree. + listing := node.IPFS("files", "ls", "/rm_rf_tree").Stdout.String() + require.NotContains(t, listing, "rm_rf_tree_src", + "ipfs files ls should not see the removed subtree") + }) + + t.Run("ln_s_and_readlink", func(t *testing.T) { + requireTool(t, "ln", "readlink", "ls") + dir := workdir(t, "ln_s_and_readlink") + link := filepath.Join(dir, "link") + + runCmd(t, "ln", "-s", "/tmp/some/target", link) + + target := strings.TrimSpace(runCmd(t, "readlink", link)) + require.Equal(t, "/tmp/some/target", target) + + // ls -l on a symlink starts with 'l'. + lsOut := runCmd(t, "ls", "-l", link) + require.True(t, strings.HasPrefix(lsOut, "l"), + "ls -l output should start with 'l' for a symlink, got: %q", lsOut) + + // Daemon view: ipfs files stat reports symlinks via the Mode + // field (lrwxrwxrwx). The Type field is "file" because MFS + // stores symlinks as TFile/TSymlink under the hood. + stat := node.IPFS("files", "stat", "/ln_s_and_readlink/link").Stdout.String() + require.Contains(t, stat, "lrwxrwxrwx", + "ipfs files stat mode should be lrwxrwxrwx for a symlink, got: %s", stat) + }) + + t.Run("find_traversal", func(t *testing.T) { + requireTool(t, "find", "ln") + dir := workdir(t, "find_traversal") + + require.NoError(t, os.WriteFile(filepath.Join(dir, "regular"), randBytes(t, payloadSize), 0o644)) + require.NoError(t, os.Mkdir(filepath.Join(dir, "subdir"), 0o755)) + runCmd(t, "ln", "-s", "regular", filepath.Join(dir, "link")) + + // strings.Fields splits on any whitespace; this is safe here + // because every test filename is ASCII with no spaces. If a + // future maintainer adds a filename with whitespace, switch + // to `find -print0` and split on '\x00' instead. + + // -type f should find exactly the regular file. + files := strings.Fields(runCmd(t, "find", dir, "-type", "f")) + require.Equal(t, []string{filepath.Join(dir, "regular")}, files) + + // -type d should find dir itself plus subdir. + dirs := strings.Fields(runCmd(t, "find", dir, "-type", "d")) + require.ElementsMatch(t, []string{dir, filepath.Join(dir, "subdir")}, dirs) + + // -type l should find exactly the symlink. + links := strings.Fields(runCmd(t, "find", dir, "-type", "l")) + require.Equal(t, []string{filepath.Join(dir, "link")}, links) + }) + + t.Run("dd_block_write", func(t *testing.T) { + requireTool(t, "dd") + dir := workdir(t, "dd_block_write") + path := filepath.Join(dir, "blob") + + // 4096 * 257 = 1052672 bytes, just past the 1 MiB chunk + // boundary. Uses /dev/urandom to avoid pulling all-zero + // pages from the kernel cache. + runCmd(t, "dd", + "if=/dev/urandom", + "of="+path, + "bs=4096", + "count=257", + "status=none", + ) + + info, err := os.Stat(path) + require.NoError(t, err) + require.Equal(t, int64(4096*257), info.Size()) + }) + + t.Run("sha256sum_roundtrip", func(t *testing.T) { + requireTool(t, "sha256sum") + dir := workdir(t, "sha256sum_roundtrip") + path := filepath.Join(dir, "blob") + + want := randBytes(t, payloadSize) + require.NoError(t, os.WriteFile(path, want, 0o644)) + + hash := sha256.Sum256(want) + wantHex := hex.EncodeToString(hash[:]) + + out := runCmd(t, "sha256sum", path) + // `sha256sum` prints " ". + gotHex := strings.Fields(out)[0] + require.Equal(t, wantHex, gotHex, + "sha256sum on FUSE-read bytes should match the bytes we wrote") + }) + + // ----- Archives ----- + + t.Run("tar_extract_into_mfs", func(t *testing.T) { + requireTool(t, "tar") + dir := workdir(t, "tar_extract_into_mfs") + + // Build the source tree and tar it up under node.Dir. + srcRoot := filepath.Join(node.Dir, "tar_extract_src") + require.NoError(t, os.MkdirAll(filepath.Join(srcRoot, "sub"), 0o755)) + oneData := randBytes(t, payloadSize) + twoData := randBytes(t, payloadSize) + require.NoError(t, os.WriteFile(filepath.Join(srcRoot, "one.bin"), oneData, 0o644)) + require.NoError(t, os.WriteFile(filepath.Join(srcRoot, "sub", "two.bin"), twoData, 0o644)) + + tarPath := filepath.Join(node.Dir, "tar_extract.tar") + runCmd(t, "tar", "-cf", tarPath, "-C", node.Dir, "tar_extract_src") + + // Extract into the FUSE mount. + runCmd(t, "tar", "-xf", tarPath, "-C", dir) + + extracted := filepath.Join(dir, "tar_extract_src") + gotOne, err := os.ReadFile(filepath.Join(extracted, "one.bin")) + require.NoError(t, err) + require.True(t, bytes.Equal(oneData, gotOne), "one.bin content") + + gotTwo, err := os.ReadFile(filepath.Join(extracted, "sub", "two.bin")) + require.NoError(t, err) + require.True(t, bytes.Equal(twoData, gotTwo), "two.bin content") + }) + + t.Run("tar_create_from_mfs", func(t *testing.T) { + requireTool(t, "tar") + dir := workdir(t, "tar_create_from_mfs") + + // Populate a small tree under the FUSE mount. + srcRoot := filepath.Join(dir, "src") + require.NoError(t, os.MkdirAll(filepath.Join(srcRoot, "sub"), 0o755)) + oneData := randBytes(t, payloadSize) + twoData := randBytes(t, payloadSize) + require.NoError(t, os.WriteFile(filepath.Join(srcRoot, "one.bin"), oneData, 0o644)) + require.NoError(t, os.WriteFile(filepath.Join(srcRoot, "sub", "two.bin"), twoData, 0o644)) + + // tar it up *from* the mount. + tarPath := filepath.Join(node.Dir, "tar_create.tar") + runCmd(t, "tar", "-cf", tarPath, "-C", dir, "src") + + // tar listing should include both leaves. + listing := runCmd(t, "tar", "-tf", tarPath) + require.Contains(t, listing, "src/one.bin") + require.Contains(t, listing, "src/sub/two.bin") + + // Extract back to a fresh dir off the mount and byte-compare. + extractDir := filepath.Join(node.Dir, "tar_create_extract") + require.NoError(t, os.MkdirAll(extractDir, 0o755)) + runCmd(t, "tar", "-xf", tarPath, "-C", extractDir) + + gotOne, err := os.ReadFile(filepath.Join(extractDir, "src", "one.bin")) + require.NoError(t, err) + require.True(t, bytes.Equal(oneData, gotOne), "one.bin survives tar round-trip") + + gotTwo, err := os.ReadFile(filepath.Join(extractDir, "src", "sub", "two.bin")) + require.NoError(t, err) + require.True(t, bytes.Equal(twoData, gotTwo), "two.bin survives tar round-trip") + }) + + // ----- Rsync ----- + + t.Run("rsync_archive_in", func(t *testing.T) { + requireTool(t, "rsync") + dir := workdir(t, "rsync_archive_in") + + // Build a tree under node.Dir with a known mode and mtime. + srcRoot := filepath.Join(node.Dir, "rsync_archive_src") + require.NoError(t, os.MkdirAll(filepath.Join(srcRoot, "sub"), 0o755)) + + oneData := randBytes(t, payloadSize) + twoData := randBytes(t, payloadSize) + onePath := filepath.Join(srcRoot, "one.bin") + twoPath := filepath.Join(srcRoot, "sub", "two.bin") + require.NoError(t, os.WriteFile(onePath, oneData, 0o640)) + require.NoError(t, os.WriteFile(twoPath, twoData, 0o640)) + + mtime := time.Date(2025, 6, 15, 12, 0, 0, 0, time.UTC) + require.NoError(t, os.Chtimes(onePath, mtime, mtime)) + require.NoError(t, os.Chtimes(twoPath, mtime, mtime)) + + // Trailing slash on source: copy the contents of srcRoot, + // not the directory itself. Mirrors typical rsync usage. + runCmd(t, "rsync", "-a", srcRoot+"/", dir+"/copy/") + + gotOne, err := os.ReadFile(filepath.Join(dir, "copy", "one.bin")) + require.NoError(t, err) + require.True(t, bytes.Equal(oneData, gotOne), "one.bin content") + + gotTwo, err := os.ReadFile(filepath.Join(dir, "copy", "sub", "two.bin")) + require.NoError(t, err) + require.True(t, bytes.Equal(twoData, gotTwo), "two.bin content") + + // Mode preserved (StoreMode is enabled at the daemon level). + oneInfo, err := os.Stat(filepath.Join(dir, "copy", "one.bin")) + require.NoError(t, err) + require.Equal(t, os.FileMode(0o640), oneInfo.Mode().Perm(), + "mode should be preserved through rsync -a") + + // Mtime preserved (StoreMtime is enabled at the daemon level). + require.WithinDuration(t, mtime, oneInfo.ModTime(), time.Second, + "mtime should be preserved through rsync -a") + }) + + t.Run("rsync_inplace_overwrite", func(t *testing.T) { + requireTool(t, "rsync") + dir := workdir(t, "rsync_inplace_overwrite") + + // Initial file is larger than the replacement so the inplace + // path has to truncate the tail. + initial := randBytes(t, payloadSize+4096) + dst := filepath.Join(dir, "inplace") + require.NoError(t, os.WriteFile(dst, initial, 0o644)) + + replacement := randBytes(t, payloadSize) + src := filepath.Join(node.Dir, "rsync_inplace_replacement") + require.NoError(t, os.WriteFile(src, replacement, 0o644)) + + runCmd(t, "rsync", "--inplace", src, dst) + + got, err := os.ReadFile(dst) + require.NoError(t, err) + require.Equal(t, len(replacement), len(got), + "file size should shrink to replacement size after --inplace") + require.True(t, bytes.Equal(replacement, got), + "content should match the replacement after --inplace") + }) + + // ----- Editor ----- + + t.Run("vim_edit_file", func(t *testing.T) { + requireTool(t, "vim") + dir := workdir(t, "vim_edit_file") + path := filepath.Join(dir, "edit.txt") + + // Build a multi-chunk file: a header line followed by enough + // "world" repeats to push the total size past one UnixFS chunk. + const word = "world\n" + repeats := payloadSize/len(word) + 1 + var buf bytes.Buffer + buf.WriteString("header\n") + for range repeats { + buf.WriteString(word) + } + original := buf.Bytes() + require.NoError(t, os.WriteFile(path, original, 0o644)) + require.Greater(t, len(original), payloadSize, "file should span multiple chunks") + + // Vim in headless ex mode: substitute world->fuse globally, + // write, quit. -E selects ex mode, -s suppresses prompts. + runCmd(t, "vim", "-E", "-s", + "-c", "%s/world/fuse/g", + "-c", "wq", + path, + ) + + got, err := os.ReadFile(path) + require.NoError(t, err) + require.NotContains(t, string(got), "world", + "after :%%s/world/fuse/g the file should contain no 'world'") + gotFuses := bytes.Count(got, []byte("fuse")) + require.Equal(t, repeats, gotFuses, + "the substitution should have replaced exactly %d occurrences", repeats) + + // Cross-verify via daemon. + daemonView := node.IPFS("files", "read", "/vim_edit_file/edit.txt").Stdout.Bytes() + require.True(t, bytes.Equal(got, daemonView), + "daemon view should match FUSE view after vim save") + }) +} diff --git a/test/cli/fuse/xattr_linux_test.go b/test/cli/fuse/xattr_linux_test.go new file mode 100644 index 00000000000..6e09b70172f --- /dev/null +++ b/test/cli/fuse/xattr_linux_test.go @@ -0,0 +1,15 @@ +// Uses unix.Getxattr which is only available on Linux. +//go:build linux + +package fuse + +import "golang.org/x/sys/unix" + +func getXattr(path, attr string) (string, error) { + buf := make([]byte, 256) + sz, err := unix.Getxattr(path, attr, buf) + if err != nil { + return "", err + } + return string(buf[:sz]), nil +} diff --git a/test/cli/fuse/xattr_other_test.go b/test/cli/fuse/xattr_other_test.go new file mode 100644 index 00000000000..3f2a4d9a744 --- /dev/null +++ b/test/cli/fuse/xattr_other_test.go @@ -0,0 +1,13 @@ +// Stub that skips xattr tests on non-Linux platforms. +//go:build !linux + +package fuse + +import ( + "fmt" + "runtime" +) + +func getXattr(_, _ string) (string, error) { + return "", fmt.Errorf("xattr not supported on %s", runtime.GOOS) +} diff --git a/test/cli/fuse_test.go b/test/cli/fuse_test.go deleted file mode 100644 index 6182a069a9e..00000000000 --- a/test/cli/fuse_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package cli - -import ( - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" - "github.com/stretchr/testify/require" -) - -func TestFUSE(t *testing.T) { - testutils.RequiresFUSE(t) - t.Parallel() - - t.Run("mount and unmount work correctly", func(t *testing.T) { - t.Parallel() - - // Create a node and start daemon - node := harness.NewT(t).NewNode().Init() - node.StartDaemon() - - // Create mount directories in the node's working directory - nodeDir := node.Dir - ipfsMount := filepath.Join(nodeDir, "ipfs") - ipnsMount := filepath.Join(nodeDir, "ipns") - mfsMount := filepath.Join(nodeDir, "mfs") - - err := os.MkdirAll(ipfsMount, 0755) - require.NoError(t, err) - err = os.MkdirAll(ipnsMount, 0755) - require.NoError(t, err) - err = os.MkdirAll(mfsMount, 0755) - require.NoError(t, err) - - // Ensure any existing mounts are cleaned up first - failOnError := false // mount points might not exist from previous runs - doUnmount(t, ipfsMount, failOnError) - doUnmount(t, ipnsMount, failOnError) - doUnmount(t, mfsMount, failOnError) - - // Test mount operation - result := node.IPFS("mount", "-f", ipfsMount, "-n", ipnsMount, "-m", mfsMount) - - // Verify mount output - expectedOutput := "IPFS mounted at: " + ipfsMount + "\n" + - "IPNS mounted at: " + ipnsMount + "\n" + - "MFS mounted at: " + mfsMount + "\n" - require.Equal(t, expectedOutput, result.Stdout.String()) - - // Test basic MFS functionality via FUSE mount - testFile := filepath.Join(mfsMount, "testfile") - testContent := "hello fuse world" - - // Create file via FUSE mount - err = os.WriteFile(testFile, []byte(testContent), 0644) - require.NoError(t, err) - - // Verify file appears in MFS via IPFS commands - result = node.IPFS("files", "ls", "/") - require.Contains(t, result.Stdout.String(), "testfile") - - // Read content back via MFS FUSE mount - readContent, err := os.ReadFile(testFile) - require.NoError(t, err) - require.Equal(t, testContent, string(readContent)) - - // Get the CID of the MFS file - result = node.IPFS("files", "stat", "/testfile", "--format=") - fileCID := strings.TrimSpace(result.Stdout.String()) - require.NotEmpty(t, fileCID, "should have a CID for the MFS file") - - // Read the same content via IPFS FUSE mount using the CID - ipfsFile := filepath.Join(ipfsMount, fileCID) - ipfsContent, err := os.ReadFile(ipfsFile) - require.NoError(t, err) - require.Equal(t, testContent, string(ipfsContent), "content should match between MFS and IPFS mounts") - - // Verify both FUSE mounts return identical data - require.Equal(t, readContent, ipfsContent, "MFS and IPFS FUSE mounts should return identical data") - - // Test that mount directories cannot be removed while mounted - err = os.Remove(ipfsMount) - require.Error(t, err, "should not be able to remove mounted directory") - - // Stop daemon - this should trigger automatic unmount via context cancellation - node.StopDaemon() - - // Daemon shutdown should handle unmount synchronously via context.AfterFunc - - // Verify directories can now be removed (indicating successful unmount) - err = os.Remove(ipfsMount) - require.NoError(t, err, "should be able to remove directory after unmount") - err = os.Remove(ipnsMount) - require.NoError(t, err, "should be able to remove directory after unmount") - err = os.Remove(mfsMount) - require.NoError(t, err, "should be able to remove directory after unmount") - }) - - t.Run("explicit unmount works", func(t *testing.T) { - t.Parallel() - - node := harness.NewT(t).NewNode().Init() - node.StartDaemon() - - // Create mount directories - nodeDir := node.Dir - ipfsMount := filepath.Join(nodeDir, "ipfs") - ipnsMount := filepath.Join(nodeDir, "ipns") - mfsMount := filepath.Join(nodeDir, "mfs") - - err := os.MkdirAll(ipfsMount, 0755) - require.NoError(t, err) - err = os.MkdirAll(ipnsMount, 0755) - require.NoError(t, err) - err = os.MkdirAll(mfsMount, 0755) - require.NoError(t, err) - - // Clean up any existing mounts - failOnError := false // mount points might not exist from previous runs - doUnmount(t, ipfsMount, failOnError) - doUnmount(t, ipnsMount, failOnError) - doUnmount(t, mfsMount, failOnError) - - // Mount - node.IPFS("mount", "-f", ipfsMount, "-n", ipnsMount, "-m", mfsMount) - - // Explicit unmount via platform-specific command - failOnError = true // test that explicit unmount works correctly - doUnmount(t, ipfsMount, failOnError) - doUnmount(t, ipnsMount, failOnError) - doUnmount(t, mfsMount, failOnError) - - // Verify directories can be removed after explicit unmount - err = os.Remove(ipfsMount) - require.NoError(t, err) - err = os.Remove(ipnsMount) - require.NoError(t, err) - err = os.Remove(mfsMount) - require.NoError(t, err) - - node.StopDaemon() - }) -} - -// doUnmount performs platform-specific unmount, similar to sharness do_umount -// failOnError: if true, unmount errors cause test failure; if false, errors are ignored (useful for cleanup) -func doUnmount(t *testing.T, mountPoint string, failOnError bool) { - t.Helper() - var cmd *exec.Cmd - if runtime.GOOS == "linux" { - // fusermount -u: unmount filesystem (strict - fails if busy) - cmd = exec.Command("fusermount", "-u", mountPoint) - } else { - cmd = exec.Command("umount", mountPoint) - } - - err := cmd.Run() - if err != nil && failOnError { - t.Fatalf("failed to unmount %s: %v", mountPoint, err) - } -} diff --git a/test/cli/harness/dht_stub_peers.go b/test/cli/harness/dht_stub_peers.go new file mode 100644 index 00000000000..c97588ede28 --- /dev/null +++ b/test/cli/harness/dht_stub_peers.go @@ -0,0 +1,145 @@ +package harness + +import ( + "context" + "encoding/hex" + "sync" + + "github.com/libp2p/go-libp2p" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-kad-dht/records" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" +) + +// stubPeerPool manages ephemeral in-process libp2p/DHT peers for +// TEST_DHT_STUB mode. +// +// All peers share a single in-memory ProviderStore. This store is +// NOT shared with the kubo daemons; it lives in the test process. +// When a kubo daemon sends ADD_PROVIDER to any ephemeral peer, the +// record is stored in this shared store. When another kubo daemon +// queries GET_PROVIDERS from any peer, it finds the record because +// all peers see the same store. The kubo daemons communicate with +// the ephemeral peers via real DHT protocol messages over loopback +// TCP. +type stubPeerPool struct { + hosts []host.Host + dhts []*dht.IpfsDHT + store *sharedMemStore + cancel context.CancelFunc +} + +// stubDHTPeerCount is the number of ephemeral DHT peers to create. +// Matches amino.DefaultBucketSize (K=20 in Kademlia), ensuring +// GetClosestPeers always finds enough peers for provide replication. +const stubDHTPeerCount = 20 + +// newStubPeerPool creates count ephemeral DHT peers on loopback and +// mesh-connects them. +func newStubPeerPool(count int) (*stubPeerPool, error) { + ctx, cancel := context.WithCancel(context.Background()) + + store := &sharedMemStore{data: make(map[string][]peer.AddrInfo)} + + hosts := make([]host.Host, 0, count) + dhts := make([]*dht.IpfsDHT, 0, count) + + cleanup := func() { + for _, d := range dhts { + d.Close() + } + for _, h := range hosts { + h.Close() + } + cancel() + } + + for range count { + h, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) + if err != nil { + cleanup() + return nil, err + } + d, err := dht.New(ctx, h, + dht.Mode(dht.ModeServer), + dht.ProviderStore(store), + dht.AddressFilter(nil), + dht.DisableAutoRefresh(), + dht.BootstrapPeers(), + ) + if err != nil { + h.Close() + cleanup() + return nil, err + } + hosts = append(hosts, h) + dhts = append(dhts, d) + } + + // Full-mesh connect so routing tables are populated. + for i, h := range hosts { + for j, other := range hosts { + if i == j { + continue + } + ai := peer.AddrInfo{ID: other.ID(), Addrs: other.Addrs()} + if err := h.Connect(ctx, ai); err != nil { + cleanup() + return nil, err + } + } + } + + return &stubPeerPool{ + hosts: hosts, + dhts: dhts, + store: store, + cancel: cancel, + }, nil +} + +func (p *stubPeerPool) Close() { + if p == nil { + return + } + for _, d := range p.dhts { + d.Close() + } + for _, h := range p.hosts { + h.Close() + } + p.cancel() +} + +// sharedMemStore implements records.ProviderStore with a shared +// in-memory map. All ephemeral peers reference the same instance +// so any peer can answer provider queries for any CID. +type sharedMemStore struct { + mu sync.RWMutex + data map[string][]peer.AddrInfo +} + +var _ records.ProviderStore = (*sharedMemStore)(nil) + +func (s *sharedMemStore) AddProvider(_ context.Context, key []byte, prov peer.AddrInfo) error { + h := hex.EncodeToString(key) + s.mu.Lock() + defer s.mu.Unlock() + for _, existing := range s.data[h] { + if existing.ID == prov.ID { + return nil + } + } + s.data[h] = append(s.data[h], prov) + return nil +} + +func (s *sharedMemStore) GetProviders(_ context.Context, key []byte) ([]peer.AddrInfo, error) { + h := hex.EncodeToString(key) + s.mu.RLock() + defer s.mu.RUnlock() + return s.data[h], nil +} + +func (s *sharedMemStore) Close() error { return nil } diff --git a/test/cli/harness/harness.go b/test/cli/harness/harness.go index 83784ec4048..dfbe700296e 100644 --- a/test/cli/harness/harness.go +++ b/test/cli/harness/harness.go @@ -22,6 +22,7 @@ type Harness struct { Runner *Runner NodesRoot string Nodes Nodes + stubPeers *stubPeerPool // ephemeral DHT peers for TEST_DHT_STUB mode } // TODO: use zaptest.NewLogger(t) instead @@ -73,6 +74,40 @@ func New(options ...func(h *Harness)) *Harness { return h } +// BootstrapWithStubDHT configures each node to bootstrap from +// ephemeral in-process DHT peers on loopback instead of the public +// swarm. Call after Init() and before StartDaemon(). +// +// Creates 20 ephemeral DHT peers lazily on the first call, shared +// across all nodes in this harness. Sets TEST_DHT_STUB on each +// node's environment so the daemon lifts WAN DHT filters to accept +// loopback peers. Peers are shut down in Cleanup(). +// +// The sweep provider needs >=20 DHT peers to estimate the network +// size (prefix length). Without enough peers it stays offline and +// never provides. +func (h *Harness) BootstrapWithStubDHT(nodes Nodes) { + if h.stubPeers == nil { + pool, err := newStubPeerPool(stubDHTPeerCount) + if err != nil { + log.Panicf("creating stub peer pool: %s", err) + } + h.stubPeers = pool + } + var addrs []string + for _, host := range h.stubPeers.hosts { + for _, addr := range host.Addrs() { + addrs = append(addrs, addr.String()+"/p2p/"+host.ID().String()) + } + } + for _, node := range nodes { + node.SetIPFSConfig("Bootstrap", addrs) + // Tell the daemon to lift WAN DHT filters so loopback + // ephemeral peers enter the WAN routing table. + node.Runner.Env["TEST_DHT_STUB"] = "1" + } +} + func osEnviron() map[string]string { m := map[string]string{} for _, entry := range os.Environ() { @@ -183,7 +218,7 @@ func (h *Harness) Sh(expr string) *RunResult { func (h *Harness) Cleanup() { log.Debugf("cleaning up cluster") h.Nodes.StopDaemons() - // TODO: don't do this if test fails, not sure how? + h.stubPeers.Close() log.Debugf("removing harness dir") err := os.RemoveAll(h.Dir) if err != nil { diff --git a/test/cli/harness/node.go b/test/cli/harness/node.go index a4ee71f937f..7c152bd190f 100644 --- a/test/cli/harness/node.go +++ b/test/cli/harness/node.go @@ -248,7 +248,7 @@ func (n *Node) Init(ipfsArgs ...string) *Node { // Telemetry disabled by default in tests. cfg.Plugins = config.Plugins{ Plugins: map[string]config.Plugin{ - "telemetry": config.Plugin{ + "telemetry": { Disabled: true, }, }, @@ -303,7 +303,10 @@ func (n *Node) StartDaemonWithAuthorization(secret string, ipfsArgs ...string) * func (n *Node) signalAndWait(watch <-chan struct{}, signal os.Signal, t time.Duration) bool { err := n.Daemon.Cmd.Process.Signal(signal) if err != nil { - if errors.Is(err, os.ErrProcessDone) { + // On Windows, Process.Wait() sets the handle state to "released" + // rather than "done", so a subsequent Signal() returns EINVAL + // instead of ErrProcessDone. Treat both as "already exited". + if errors.Is(err, os.ErrProcessDone) || errors.Is(err, syscall.EINVAL) { log.Debugf("process for node %d has already finished", n.ID) return true } @@ -739,6 +742,12 @@ func (n *Node) DatastoreCount(prefix string) int64 { return count } +// DatastorePut writes a key-value pair to the datastore. +// Requires the daemon to be stopped. +func (n *Node) DatastorePut(key, value string) { + n.IPFS("diag", "datastore", "put", key, value) +} + // DatastoreGet retrieves the value at the given key. // Requires the daemon to be stopped. Returns nil if key not found. func (n *Node) DatastoreGet(key string) []byte { diff --git a/test/cli/ipfswatch_test.go b/test/cli/ipfswatch_test.go index 898f799f45e..ce5798c6cd1 100644 --- a/test/cli/ipfswatch_test.go +++ b/test/cli/ipfswatch_test.go @@ -1,3 +1,4 @@ +// Excluded from plan9 (no fsnotify support). //go:build !plan9 package cli diff --git a/test/cli/key_test.go b/test/cli/key_test.go new file mode 100644 index 00000000000..d04cfcb6a59 --- /dev/null +++ b/test/cli/key_test.go @@ -0,0 +1,46 @@ +package cli + +import ( + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestKeyExportFilePermissions(t *testing.T) { + t.Parallel() + + if runtime.GOOS == "windows" { + t.Skip("Unix file permissions not applicable on Windows") + } + + node := harness.NewT(t).NewNode().Init() + + node.IPFS("key", "gen", "--type=ed25519", "testkey") + + t.Run("libp2p-protobuf-cleartext format", func(t *testing.T) { + t.Parallel() + exportPath := filepath.Join(t.TempDir(), "testkey.key") + node.IPFS("key", "export", "testkey", "-o", exportPath) + + info, err := os.Stat(exportPath) + require.NoError(t, err) + assert.Equal(t, os.FileMode(0o600), info.Mode().Perm(), + "exported key file should have owner-only permissions") + }) + + t.Run("pem-pkcs8-cleartext format", func(t *testing.T) { + t.Parallel() + exportPath := filepath.Join(t.TempDir(), "testkey.pem") + node.IPFS("key", "export", "testkey", "-o", exportPath, "-f", "pem-pkcs8-cleartext") + + info, err := os.Stat(exportPath) + require.NoError(t, err) + assert.Equal(t, os.FileMode(0o600), info.Mode().Perm(), + "exported PEM key file should have owner-only permissions") + }) +} diff --git a/test/cli/migrations/migration_17_to_latest_test.go b/test/cli/migrations/migration_17_to_latest_test.go index 53d6a0c5e30..287dbac50bb 100644 --- a/test/cli/migrations/migration_17_to_latest_test.go +++ b/test/cli/migrations/migration_17_to_latest_test.go @@ -213,8 +213,8 @@ func testInvalidStrategyMigration(t *testing.T) { outputStr := string(output) t.Logf("Daemon output with invalid strategy: %s", outputStr) - // The error should mention unknown strategy - require.Contains(t, outputStr, "unknown strategy", "Should report unknown strategy error") + // The error should mention unknown strategy token + require.Contains(t, outputStr, "unknown provide strategy token", "Should report unknown strategy error") } func testRepoProviderReproviderMigration(t *testing.T) { diff --git a/test/cli/pin_ls_names_test.go b/test/cli/pin_ls_names_test.go index 8c54a7a26a1..b21c2007849 100644 --- a/test/cli/pin_ls_names_test.go +++ b/test/cli/pin_ls_names_test.go @@ -509,6 +509,17 @@ func TestPinLsEdgeCases(t *testing.T) { require.Contains(t, res.Stderr.String(), "must be one of {direct, indirect, recursive, all}") }) + t.Run("known but non-listable pin type returns error", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // "internal" is a valid pin.Mode in boxo but not a valid --type for pin ls. + // Before the fix, this caused a panic instead of returning an error. + res := node.RunIPFS("pin", "ls", "--type=internal") + require.NotEqual(t, 0, res.ExitCode()) + require.Contains(t, res.Stderr.String(), "invalid type 'internal'") + }) + t.Run("non-existent path returns proper error", func(t *testing.T) { t.Parallel() node := setupTestNode(t) diff --git a/test/cli/provider_test.go b/test/cli/provider_test.go index a62ce99446e..d1756ae0503 100644 --- a/test/cli/provider_test.go +++ b/test/cli/provider_test.go @@ -6,6 +6,10 @@ import ( "fmt" "net/http" "net/http/httptest" + "os" + "path/filepath" + "regexp" + "strconv" "strings" "sync/atomic" "testing" @@ -19,27 +23,43 @@ import ( const ( timeStep = 20 * time.Millisecond - timeout = time.Second + timeout = 30 * time.Second ) type cfgApplier func(*harness.Node) -func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) { +// uniq appends a nanosecond timestamp to s, ensuring unique CIDs +// across test runs and parallel subtests. +func uniq(s string) string { + return s + " " + strconv.FormatInt(time.Now().UnixNano(), 10) +} + +// awaitReprovideFunc waits until at least minCIDs have been provided +// and returns the total number of CIDs provided so far. The returned +// count can be passed as minCIDs to a subsequent call to wait for the +// next reprovide cycle. +type awaitReprovideFunc func(t *testing.T, n *harness.Node, minCIDs int64) int64 + +func runProviderSuite(t *testing.T, sweep bool, apply cfgApplier, awaitReprovide awaitReprovideFunc) { t.Helper() initNodes := func(t *testing.T, n int, fn func(n *harness.Node)) harness.Nodes { - nodes := harness.NewT(t).NewNodes(n).Init() + h := harness.NewT(t) + nodes := h.NewNodes(n).Init() nodes.ForEachPar(apply) nodes.ForEachPar(fn) + h.BootstrapWithStubDHT(nodes) nodes = nodes.StartDaemons().Connect() time.Sleep(500 * time.Millisecond) // wait for DHT clients to be bootstrapped return nodes } initNodesWithoutStart := func(t *testing.T, n int, fn func(n *harness.Node)) harness.Nodes { - nodes := harness.NewT(t).NewNodes(n).Init() + h := harness.NewT(t) + nodes := h.NewNodes(n).Init() nodes.ForEachPar(apply) nodes.ForEachPar(fn) + h.BootstrapWithStubDHT(nodes) return nodes } @@ -228,93 +248,325 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) { expectNoProviders(t, cid, nodes[1:]...) }) - // It is a lesser evil - forces users to fix their config and have some sort of interval - t.Run("Manual Reprovide trigger does not work when periodic reprovide is disabled", func(t *testing.T) { + // `routing reprovide` is only available with the legacy provider. + // Sweep provider reprovides automatically on schedule. + if !sweep { + t.Run("Manual Reprovide trigger does not work when periodic reprovide is disabled", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.DHT.Interval", "0") + }) + defer nodes.StopDaemons() + + cid := nodes[0].IPFSAddStr(time.Now().String()) + + expectNoProviders(t, cid, nodes[1:]...) + + res := nodes[0].RunIPFS("routing", "reprovide") + assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.DHT.Interval is set to '0'") + assert.Equal(t, 1, res.ExitCode()) + + expectNoProviders(t, cid, nodes[1:]...) + }) + + t.Run("Manual Reprovide trigger does not work when Provide system is disabled", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Enabled", false) + }) + defer nodes.StopDaemons() + + cid := nodes[0].IPFSAddStr(time.Now().String()) + + expectNoProviders(t, cid, nodes[1:]...) + + res := nodes[0].RunIPFS("routing", "reprovide") + assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.Enabled is set to 'false'") + assert.Equal(t, 1, res.ExitCode()) + + expectNoProviders(t, cid, nodes[1:]...) + }) + } + + t.Run("Provide with 'all' strategy", func(t *testing.T) { t.Parallel() nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Provide.DHT.Interval", "0") + n.SetIPFSConfig("Provide.Strategy", "all") }) defer nodes.StopDaemons() + publisher := nodes[0] - cid := nodes[0].IPFSAddStr(time.Now().String()) + cid := publisher.IPFSAddStr(uniq("all strategy")) + expectProviders(t, cid, publisher.PeerID().String(), nodes[1:]...) + }) - expectNoProviders(t, cid, nodes[1:]...) + t.Run("Provide with 'pinned' strategy", func(t *testing.T) { + t.Parallel() - res := nodes[0].RunIPFS("routing", "reprovide") - assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.DHT.Interval is set to '0'") - assert.Equal(t, 1, res.ExitCode()) + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "pinned") + }) + defer nodes.StopDaemons() + publisher := nodes[0] + // Add a non-pinned CID (should not be provided) + cid := publisher.IPFSAddStr(uniq("pinned strategy"), "--pin=false") expectNoProviders(t, cid, nodes[1:]...) + + // Pin the CID (should now be provided) + publisher.IPFS("pin", "add", cid) + expectProviders(t, cid, publisher.PeerID().String(), nodes[1:]...) }) - // It is a lesser evil - forces users to fix their config and have some sort of interval - t.Run("Manual Reprovide trigger does not work when Provide system is disabled", func(t *testing.T) { + t.Run("Provide with 'pinned+mfs' strategy", func(t *testing.T) { t.Parallel() nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Provide.Enabled", false) + n.SetIPFSConfig("Provide.Strategy", "pinned+mfs") }) defer nodes.StopDaemons() + publisher := nodes[0] - cid := nodes[0].IPFSAddStr(time.Now().String()) + cidPinned := publisher.IPFSAddStr(uniq("pinned content")) + cidUnpinned := publisher.IPFSAddStr(uniq("unpinned content"), "--pin=false") + cidMFS := publisher.IPFSAddStr(uniq("mfs content"), "--pin=false") + publisher.IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile") - expectNoProviders(t, cid, nodes[1:]...) + expectProviders(t, cidPinned, publisher.PeerID().String(), nodes[1:]...) + expectNoProviders(t, cidUnpinned, nodes[1:]...) + expectProviders(t, cidMFS, publisher.PeerID().String(), nodes[1:]...) + }) - res := nodes[0].RunIPFS("routing", "reprovide") - assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.Enabled is set to 'false'") - assert.Equal(t, 1, res.ExitCode()) + // addLargeFileInSubdir adds a 2 MiB file inside /subdir/ in MFS and + // returns the MFS root CID, the file root CID, and a chunk CID. + // The file is large enough to be split into multiple blocks. + // The resulting DAG: root-dir/subdir/largefile (2+ chunks). + addLargeFileInSubdir := func(t *testing.T, publisher *harness.Node) (cidRoot, cidSubdir, cidFile, cidChunk string) { + t.Helper() + largeData := random.Bytes(2 * 1024 * 1024) // 2 MiB = 2 chunks at 1 MiB + + // Add file without pinning, then build directory structure in MFS + cidFile = publisher.IPFSAdd(bytes.NewReader(largeData), "-Q", "--pin=false") + publisher.IPFS("files", "mkdir", "-p", "/subdir") + publisher.IPFS("files", "cp", "/ipfs/"+cidFile, "/subdir/largefile") + + // Get CIDs for the directory structure + cidRoot = publisher.IPFS("files", "stat", "--hash", "/").Stdout.Trimmed() + cidSubdir = publisher.IPFS("files", "stat", "--hash", "/subdir").Stdout.Trimmed() + + // Get a chunk CID from the file's DAG links + dagOut := publisher.IPFS("dag", "get", cidFile) + var dagNode struct { + Links []struct { + Hash map[string]string `json:"Hash"` + } `json:"Links"` + } + require.NoError(t, json.Unmarshal(dagOut.Stdout.Bytes(), &dagNode)) + require.Greater(t, len(dagNode.Links), 1, "file should have multiple chunks") + cidChunk = dagNode.Links[0].Hash["/"] + require.NotEmpty(t, cidChunk) - expectNoProviders(t, cid, nodes[1:]...) + return cidRoot, cidSubdir, cidFile, cidChunk + } + + // +unique and +entities tests verify which CIDs end up in the DHT + // (strategy scope). Bloom filter deduplication correctness and + // entity type detection are tested in boxo/dag/walker/*_test.go. + + t.Run("Provide with 'pinned+mfs+unique' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "pinned+mfs+unique") + n.SetIPFSConfig("Import.UnixFSChunker", "size-1048576") // 1 MiB chunks + }) + defer nodes.StopDaemons() + publisher, peers := nodes[0], nodes[1:] + + // +unique provides all blocks in pinned DAGs (same scope as + // pinned+mfs but with bloom filter dedup across pins). + // Use --fast-provide-dag and --fast-provide-wait on pin add + // so we can verify which blocks the strategy includes. + cidRoot, cidSubdir, cidFile, cidChunk := addLargeFileInSubdir(t, publisher) + publisher.IPFS("pin", "add", "--fast-provide-dag", "--fast-provide-wait", cidRoot) + cidUnpinned := publisher.IPFSAddStr(uniq("unpinned content"), "--pin=false") + + pid := publisher.PeerID().String() + // All blocks in the pinned DAG should be provided (including chunks) + expectProviders(t, cidRoot, pid, peers...) + expectProviders(t, cidSubdir, pid, peers...) + expectProviders(t, cidFile, pid, peers...) + expectProviders(t, cidChunk, pid, peers...) + expectNoProviders(t, cidUnpinned, peers...) }) - t.Run("Provide with 'all' strategy", func(t *testing.T) { + t.Run("Provide with 'pinned+mfs+entities' strategy", func(t *testing.T) { t.Parallel() nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Provide.Strategy", "all") + n.SetIPFSConfig("Provide.Strategy", "pinned+mfs+entities") + n.SetIPFSConfig("Import.UnixFSChunker", "size-1048576") // 1 MiB chunks }) defer nodes.StopDaemons() + publisher, peers := nodes[0], nodes[1:] + + // +entities provides only entity roots (files, directories, + // HAMT shards) and skips internal file chunks. + // Use --fast-provide-dag and --fast-provide-wait on pin add + // so we can verify which blocks the strategy skips. + cidRoot, cidSubdir, cidFile, cidChunk := addLargeFileInSubdir(t, publisher) + publisher.IPFS("pin", "add", "--fast-provide-dag", "--fast-provide-wait", cidRoot) + + pid := publisher.PeerID().String() + // Entity roots: directories and file root + expectProviders(t, cidRoot, pid, peers...) + expectProviders(t, cidSubdir, pid, peers...) + expectProviders(t, cidFile, pid, peers...) + // Internal chunk should NOT be provided (+entities skips chunks) + expectNoProviders(t, cidChunk, peers...) + }) - cid := nodes[0].IPFSAddStr("all strategy") - expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + t.Run("ipfs add --fast-provide-dag honors +entities (no chunk providing)", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "pinned+entities") + n.SetIPFSConfig("Import.UnixFSChunker", "size-1048576") // 1 MiB chunks + }) + defer nodes.StopDaemons() + publisher, peers := nodes[0], nodes[1:] + + // Regression test for the providingDagService double-providing + // path. Before the fix, ipfs add --pin --fast-provide-dag wrapped + // the DAGService with providingDagService, which announced every + // block as it was written -- including chunks -- regardless of + // the +entities modifier. The post-add ExecuteFastProvideDAG + // walk then ran in parallel, so chunks ended up in the DHT + // despite +entities saying they should be skipped. + // + // After the fix, ExecuteFastProvideDAG is the single mechanism + // for --fast-provide-dag and respects the active strategy. + largeData := random.Bytes(2 * 1024 * 1024) // 2 MiB = 2 chunks + cidFile := publisher.IPFSAdd(bytes.NewReader(largeData), + "--fast-provide-dag", "--fast-provide-wait") + + // Get a chunk CID from the file's DAG links + dagOut := publisher.IPFS("dag", "get", cidFile) + var dagNode struct { + Links []struct { + Hash map[string]string `json:"Hash"` + } `json:"Links"` + } + require.NoError(t, json.Unmarshal(dagOut.Stdout.Bytes(), &dagNode)) + require.Greater(t, len(dagNode.Links), 1, "file should have multiple chunks") + cidChunk := dagNode.Links[0].Hash["/"] + require.NotEmpty(t, cidChunk) + + pid := publisher.PeerID().String() + // File root (entity) should be provided + expectProviders(t, cidFile, pid, peers...) + // Chunk should NOT be provided (+entities skips chunks) + expectNoProviders(t, cidChunk, peers...) }) - t.Run("Provide with 'pinned' strategy", func(t *testing.T) { + // addLargeFilestoreFile writes a 2 MiB file to the publisher's + // node directory and adds it via --nocopy, returning the root CID + // and a chunk CID from the file's DAG links. With the configured + // 1 MiB chunker the file produces multiple leaf blocks so we can + // distinguish root-level from chunk-level provide behavior. + addLargeFilestoreFile := func(t *testing.T, publisher *harness.Node, addArgs ...string) (cidRoot, cidChunk string) { + t.Helper() + filePath := filepath.Join(publisher.Dir, "filestore-"+strconv.FormatInt(time.Now().UnixNano(), 10)+".bin") + require.NoError(t, os.WriteFile(filePath, random.Bytes(2*1024*1024), 0o644)) + + args := append([]string{"add", "-q", "--nocopy"}, addArgs...) + args = append(args, filePath) + cidRoot = strings.TrimSpace(publisher.IPFS(args...).Stdout.String()) + + dagOut := publisher.IPFS("dag", "get", cidRoot) + var dagNode struct { + Links []struct { + Hash map[string]string `json:"Hash"` + } `json:"Links"` + } + require.NoError(t, json.Unmarshal(dagOut.Stdout.Bytes(), &dagNode)) + require.Greater(t, len(dagNode.Links), 1, "filestore file should have multiple chunks") + cidChunk = dagNode.Links[0].Hash["/"] + require.NotEmpty(t, cidChunk) + return cidRoot, cidChunk + } + + t.Run("Filestore --nocopy with 'all' strategy provides every block", func(t *testing.T) { t.Parallel() nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Provide.Strategy", "pinned") + n.SetIPFSConfig("Experimental.FilestoreEnabled", true) + n.SetIPFSConfig("Provide.Strategy", "all") + n.SetIPFSConfig("Import.UnixFSChunker", "size-1048576") // 1 MiB chunks }) defer nodes.StopDaemons() + publisher, peers := nodes[0], nodes[1:] - // Add a non-pinned CID (should not be provided) - cid := nodes[0].IPFSAddStr("pinned strategy", "--pin=false") - expectNoProviders(t, cid, nodes[1:]...) + // Positive control: with the default 'all' strategy the + // filestore Put path provides every block as it is written, + // including non-root chunks. + cidRoot, cidChunk := addLargeFilestoreFile(t, publisher) - // Pin the CID (should now be provided) - nodes[0].IPFS("pin", "add", cid) - expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + pid := publisher.PeerID().String() + expectProviders(t, cidRoot, pid, peers...) + expectProviders(t, cidChunk, pid, peers...) }) - t.Run("Provide with 'pinned+mfs' strategy", func(t *testing.T) { + t.Run("Filestore --nocopy with selective strategy skips write-time provide", func(t *testing.T) { t.Parallel() nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Provide.Strategy", "pinned+mfs") + n.SetIPFSConfig("Experimental.FilestoreEnabled", true) + n.SetIPFSConfig("Provide.Strategy", "pinned") + n.SetIPFSConfig("Import.UnixFSChunker", "size-1048576") // 1 MiB chunks }) defer nodes.StopDaemons() + publisher, peers := nodes[0], nodes[1:] + + // With a selective strategy the filestore must not eagerly + // announce blocks at write time. --pin=false skips the pin + // (so fast-provide-root has nothing to do) and + // --fast-provide-root=false disables it explicitly, isolating + // the assertion to the filestore's internal provide path. + cidRoot, cidChunk := addLargeFilestoreFile(t, publisher, + "--pin=false", "--fast-provide-root=false") + + expectNoProviders(t, cidRoot, peers...) + expectNoProviders(t, cidChunk, peers...) + }) - // Add a pinned CID (should be provided) - cidPinned := nodes[0].IPFSAddStr("pinned content") - cidUnpinned := nodes[0].IPFSAddStr("unpinned content", "--pin=false") - cidMFS := nodes[0].IPFSAddStr("mfs content", "--pin=false") - nodes[0].IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile") + t.Run("Filestore --nocopy + selective strategy + --fast-provide-dag walks DAG", func(t *testing.T) { + t.Parallel() - n0pid := nodes[0].PeerID().String() - expectProviders(t, cidPinned, n0pid, nodes[1:]...) - expectNoProviders(t, cidUnpinned, nodes[1:]...) - expectProviders(t, cidMFS, n0pid, nodes[1:]...) + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Experimental.FilestoreEnabled", true) + n.SetIPFSConfig("Provide.Strategy", "pinned") + n.SetIPFSConfig("Import.UnixFSChunker", "size-1048576") // 1 MiB chunks + }) + defer nodes.StopDaemons() + publisher, peers := nodes[0], nodes[1:] + + // The selective-strategy gate skips the filestore's write-time + // provide, but the post-add ExecuteFastProvideDAG walk reads + // blocks through the wrapping blockstore (which transparently + // serves filestore-backed content) and announces each block, + // honoring the active strategy. This is the integration test + // behind the changelog claim that filestore content now plays + // well with the fast-provide-dag flag. + cidRoot, cidChunk := addLargeFilestoreFile(t, publisher, + "--fast-provide-dag", "--fast-provide-wait") + + pid := publisher.PeerID().String() + expectProviders(t, cidRoot, pid, peers...) + expectProviders(t, cidChunk, pid, peers...) }) t.Run("Provide with 'roots' strategy", func(t *testing.T) { @@ -324,13 +576,17 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) { n.SetIPFSConfig("Provide.Strategy", "roots") }) defer nodes.StopDaemons() + publisher := nodes[0] - // Add a root CID (should be provided) - cidRoot := nodes[0].IPFSAddStr("roots strategy", "-w", "-Q") - // the same without wrapping should give us a child node. - cidChild := nodes[0].IPFSAddStr("root strategy", "--pin=false") + // Add with -w: the wrapper directory is the recursive pin root, + // the file inside is a child block of that pin (not a root). + // Use --only-hash first to learn the child CID without providing. + data := random.Bytes(1000) + cidChild := publisher.IPFSAdd(bytes.NewReader(data), "-Q", "--only-hash") + cidRoot := publisher.IPFSAdd(bytes.NewReader(data), "-Q", "-w") - expectProviders(t, cidRoot, nodes[0].PeerID().String(), nodes[1:]...) + // 'roots' strategy provides only pin roots, not child blocks. + expectProviders(t, cidRoot, publisher.PeerID().String(), nodes[1:]...) expectNoProviders(t, cidChild, nodes[1:]...) }) @@ -341,19 +597,66 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) { n.SetIPFSConfig("Provide.Strategy", "mfs") }) defer nodes.StopDaemons() + publisher := nodes[0] - // Add a file to MFS (should be provided) - data := random.Bytes(1000) - cid := nodes[0].IPFSAdd(bytes.NewReader(data), "-Q") + // 'mfs' only provides content in MFS. Pinned content outside + // MFS should NOT be provided (mfs excludes pinned by default). + cidPinned := publisher.IPFSAddStr(uniq("pinned but not mfs")) + expectNoProviders(t, cidPinned, nodes[1:]...) - // not yet in MFS - expectNoProviders(t, cid, nodes[1:]...) + // Add to MFS (should be provided) + data := random.Bytes(1000) + cidMFS := publisher.IPFSAdd(bytes.NewReader(data), "-Q", "--pin=false") + publisher.IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile") + expectProviders(t, cidMFS, publisher.PeerID().String(), nodes[1:]...) - nodes[0].IPFS("files", "cp", "/ipfs/"+cid, "/myfile") - expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + // Pinned CID still not provided (mfs strategy ignores pins) + expectNoProviders(t, cidPinned, nodes[1:]...) }) - if reprovide { + // Reprovide tests: add content offline, start daemon, wait for reprovide. + // + // Each test waits for TWO reprovide cycles to confirm the schedule + // works repeatedly, not just on the initial bootstrap. The second + // cycle also catches bugs where state isn't persisted across cycles. + // + // Legacy: `routing reprovide` blocks until the reprovide cycle finishes, + // so we call it and check results immediately after. + // + // Sweep: no manual trigger exists. Instead, we set + // Provide.DHT.Interval=30s on the importing node and poll + // `provide stat` until the cycle completes. + + // verifyReprovide waits for two reprovide cycles and asserts which + // CIDs are/aren't findable after each. minCIDs is the expected + // number of provided CIDs per cycle. + verifyReprovide := func( + t *testing.T, + publisher *harness.Node, + queriers harness.Nodes, + minCIDs int64, + provided []string, + notProvided []string, + ) { + t.Helper() + pid := publisher.PeerID().String() + check := func() { + for _, c := range provided { + expectProviders(t, c, pid, queriers...) + } + for _, c := range notProvided { + expectNoProviders(t, c, queriers...) + } + } + + after1 := awaitReprovide(t, publisher, minCIDs) + check() + // Second cycle: confirms the schedule runs repeatedly. + awaitReprovide(t, publisher, after1+minCIDs) + check() + } + + { t.Run("Reprovides with 'all' strategy when strategy is '' (empty)", func(t *testing.T) { t.Parallel() @@ -361,16 +664,19 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) { nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { n.SetIPFSConfig("Provide.Strategy", "") }) + publisher := nodes[0] + if sweep { + publisher.SetIPFSConfig("Provide.DHT.Interval", "30s") + } - cid := nodes[0].IPFSAddStr(time.Now().String()) + cid := publisher.IPFSAddStr(time.Now().String()) nodes = nodes.StartDaemons().Connect() defer nodes.StopDaemons() - expectNoProviders(t, cid, nodes[1:]...) + peers := nodes[1:] - nodes[0].IPFS("routing", "reprovide") - - expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + verifyReprovide(t, publisher, peers, 1, // 1 block added + []string{cid}, nil) }) t.Run("Reprovides with 'all' strategy", func(t *testing.T) { @@ -379,16 +685,19 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) { nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { n.SetIPFSConfig("Provide.Strategy", "all") }) + publisher := nodes[0] + if sweep { + publisher.SetIPFSConfig("Provide.DHT.Interval", "30s") + } - cid := nodes[0].IPFSAddStr(time.Now().String()) + cid := publisher.IPFSAddStr(time.Now().String()) nodes = nodes.StartDaemons().Connect() defer nodes.StopDaemons() - expectNoProviders(t, cid, nodes[1:]...) + peers := nodes[1:] - nodes[0].IPFS("routing", "reprovide") - - expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + verifyReprovide(t, publisher, peers, 1, // 1 block added + []string{cid}, nil) }) t.Run("Reprovides with 'pinned' strategy", func(t *testing.T) { @@ -400,62 +709,54 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) { nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { n.SetIPFSConfig("Provide.Strategy", "pinned") }) + publisher := nodes[0] + if sweep { + publisher.SetIPFSConfig("Provide.DHT.Interval", "30s") + } - // Add a pin while offline so it cannot be provided - cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "-w") + // Add a pin while offline + cidBarDir := publisher.IPFSAdd(bytes.NewReader(bar), "-Q", "-w") nodes = nodes.StartDaemons().Connect() defer nodes.StopDaemons() + peers := nodes[1:] - // Add content without pinning while daemon line - cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo), "--pin=false") - cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false") + // Add content without pinning while daemon is online + cidFoo := publisher.IPFSAdd(bytes.NewReader(foo), "--pin=false") + cidBar := publisher.IPFSAdd(bytes.NewReader(bar), "--pin=false") - // Nothing should have been provided. The pin was offline, and - // the others should not be provided per the strategy. - expectNoProviders(t, cidFoo, nodes[1:]...) - expectNoProviders(t, cidBar, nodes[1:]...) - expectNoProviders(t, cidBarDir, nodes[1:]...) - - nodes[0].IPFS("routing", "reprovide") - - // cidFoo is not pinned so should not be provided. - expectNoProviders(t, cidFoo, nodes[1:]...) - // cidBar gets provided by being a child from cidBarDir even though we added with pin=false. - expectProviders(t, cidBar, nodes[0].PeerID().String(), nodes[1:]...) - expectProviders(t, cidBarDir, nodes[0].PeerID().String(), nodes[1:]...) + verifyReprovide(t, publisher, peers, 2, // cidBar + cidBarDir (bar is child of the wrapped dir pin) + []string{cidBar, cidBarDir}, + []string{cidFoo}) // cidFoo not pinned }) t.Run("Reprovides with 'roots' strategy", func(t *testing.T) { t.Parallel() - foo := random.Bytes(1000) bar := random.Bytes(1000) nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { n.SetIPFSConfig("Provide.Strategy", "roots") }) - n0pid := nodes[0].PeerID().String() + publisher := nodes[0] + if sweep { + publisher.SetIPFSConfig("Provide.DHT.Interval", "30s") + } - // Add a pin. Only root should get pinned but not provided - // because node not started - cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "-w") + // Compute the child CID without storing anything (safe + // offline, daemon not started yet). + cidChild := publisher.IPFSAdd(bytes.NewReader(bar), "-Q", "--only-hash") + // Add with -w: pins the wrapper directory as root. The file + // inside is a child block of that pin, not a root. + cidRoot := publisher.IPFSAdd(bytes.NewReader(bar), "-Q", "-w") nodes = nodes.StartDaemons().Connect() defer nodes.StopDaemons() + peers := nodes[1:] - cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo)) - cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false") - - // cidFoo will get provided per the strategy but cidBar will not. - expectProviders(t, cidFoo, n0pid, nodes[1:]...) - expectNoProviders(t, cidBar, nodes[1:]...) - - nodes[0].IPFS("routing", "reprovide") - - expectProviders(t, cidFoo, n0pid, nodes[1:]...) - expectNoProviders(t, cidBar, nodes[1:]...) - expectProviders(t, cidBarDir, n0pid, nodes[1:]...) + verifyReprovide(t, publisher, peers, 1, // cidRoot (only pin root) + []string{cidRoot}, + []string{cidChild}) // child of pin, not a root }) t.Run("Reprovides with 'mfs' strategy", func(t *testing.T) { @@ -466,22 +767,24 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) { nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { n.SetIPFSConfig("Provide.Strategy", "mfs") }) - n0pid := nodes[0].PeerID().String() + publisher := nodes[0] + if sweep { + publisher.SetIPFSConfig("Provide.DHT.Interval", "30s") + } - // add something and lets put it in MFS - cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false", "-Q") - nodes[0].IPFS("files", "cp", "/ipfs/"+cidBar, "/myfile") + // Add to MFS (should be provided) + cidMFS := publisher.IPFSAdd(bytes.NewReader(bar), "--pin=false", "-Q") + publisher.IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile") + // Pin something NOT in MFS (should NOT be provided) + cidPinned := publisher.IPFSAddStr(uniq("pinned but not mfs")) nodes = nodes.StartDaemons().Connect() defer nodes.StopDaemons() + peers := nodes[1:] - // cidBar is in MFS but not provided - expectNoProviders(t, cidBar, nodes[1:]...) - - nodes[0].IPFS("routing", "reprovide") - - // And now is provided - expectProviders(t, cidBar, n0pid, nodes[1:]...) + verifyReprovide(t, publisher, peers, 1, // cidMFS only + []string{cidMFS}, + []string{cidPinned}) // mfs strategy ignores pinned content outside MFS }) t.Run("Reprovides with 'pinned+mfs' strategy", func(t *testing.T) { @@ -490,28 +793,79 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) { nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { n.SetIPFSConfig("Provide.Strategy", "pinned+mfs") }) - n0pid := nodes[0].PeerID().String() + publisher := nodes[0] + if sweep { + publisher.SetIPFSConfig("Provide.DHT.Interval", "30s") + } // Add a pinned CID (should be provided) - cidPinned := nodes[0].IPFSAddStr("pinned content", "--pin=true") + cidPinned := publisher.IPFSAddStr(uniq("pinned content"), "--pin=true") // Add a CID to MFS (should be provided) - cidMFS := nodes[0].IPFSAddStr("mfs content") - nodes[0].IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile") + cidMFS := publisher.IPFSAddStr(uniq("mfs content")) + publisher.IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile") // Add a CID that is neither pinned nor in MFS (should not be provided) - cidNeither := nodes[0].IPFSAddStr("neither content", "--pin=false") + cidNeither := publisher.IPFSAddStr(uniq("neither content"), "--pin=false") + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + peers := nodes[1:] + + verifyReprovide(t, publisher, peers, 2, // cidPinned + cidMFS + []string{cidPinned, cidMFS}, + []string{cidNeither}) // neither pinned nor in MFS + }) + + t.Run("Reprovides with 'pinned+mfs+unique' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "pinned+mfs+unique") + n.SetIPFSConfig("Import.UnixFSChunker", "size-1048576") // 1 MiB chunks + }) + publisher := nodes[0] + if sweep { + publisher.SetIPFSConfig("Provide.DHT.Interval", "30s") + } + + // Build a directory DAG with a multi-chunk file in MFS, then pin it. + cidRoot, cidSubdir, cidFile, cidChunk := addLargeFileInSubdir(t, publisher) + publisher.IPFS("pin", "add", cidRoot) + cidUnpinned := publisher.IPFSAddStr(uniq("unpinned content"), "--pin=false") nodes = nodes.StartDaemons().Connect() defer nodes.StopDaemons() + peers := nodes[1:] + + // +unique provides all blocks in pinned DAGs (same as pinned+mfs) + verifyReprovide(t, publisher, peers, 4, // root + subdir + file + chunks + []string{cidRoot, cidSubdir, cidFile, cidChunk}, + []string{cidUnpinned}) + }) - // Trigger reprovide - nodes[0].IPFS("routing", "reprovide") + t.Run("Reprovides with 'pinned+mfs+entities' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "pinned+mfs+entities") + n.SetIPFSConfig("Import.UnixFSChunker", "size-1048576") // 1 MiB chunks + }) + publisher := nodes[0] + if sweep { + publisher.SetIPFSConfig("Provide.DHT.Interval", "30s") + } + + // Build a directory DAG with a multi-chunk file in MFS, then pin it. + cidRoot, cidSubdir, cidFile, cidChunk := addLargeFileInSubdir(t, publisher) + publisher.IPFS("pin", "add", cidRoot) + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + peers := nodes[1:] - // Check that pinned CID is provided - expectProviders(t, cidPinned, n0pid, nodes[1:]...) - // Check that MFS CID is provided - expectProviders(t, cidMFS, n0pid, nodes[1:]...) - // Check that neither CID is not provided - expectNoProviders(t, cidNeither, nodes[1:]...) + // Entity roots: directories and file root (not chunks) + verifyReprovide(t, publisher, peers, 3, // root + subdir + file (not chunks) + []string{cidRoot, cidSubdir, cidFile}, + []string{cidChunk}) // chunks skipped by +entities }) } @@ -712,12 +1066,22 @@ type provideStatJSON struct { Schedule struct { NextReprovidePrefix string `json:"next_reprovide_prefix"` } `json:"schedule"` + Operations struct { + Ongoing struct { + KeyReprovides int `json:"key_reprovides"` + } `json:"ongoing"` + Past struct { + KeysProvided int64 `json:"keys_provided"` + } `json:"past"` + } `json:"operations"` + Queues struct { + PendingKeyProvides int64 `json:"pending_key_provides"` + } `json:"queues"` } `json:"Sweep"` } // parseProvideStatJSON extracts timing and schedule information from // the JSON output of 'ipfs provide stat --enc=json'. -// Note: prefix is unused in current tests but kept for potential future use. func parseProvideStatJSON(output string) (offset time.Duration, prefix string, err error) { var stat provideStatJSON if err := json.Unmarshal([]byte(output), &stat); err != nil { @@ -728,43 +1092,304 @@ func parseProvideStatJSON(output string) (offset time.Duration, prefix string, e return offset, prefix, nil } +// waitForSweepReprovide polls `provide stat --enc=json` until the +// sweep provider has provided at least minCIDs and no work is pending. +// Pass 0 for minCIDs to just wait for any provide activity to finish. +// Returns the total CIDs provided so far (for use as minCIDs in a +// subsequent call to wait for the next cycle). +// The importing node must have a short Provide.DHT.Interval so the +// reprovide cycle completes within the timeout. +func waitForSweepReprovide(t *testing.T, n *harness.Node, timeout time.Duration, minCIDs int64) int64 { + t.Helper() + if minCIDs == 0 { + minCIDs = 1 + } + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + res := n.RunIPFS("provide", "stat", "--enc=json") + if res.ExitCode() == 0 { + var stat provideStatJSON + if err := json.Unmarshal(res.Stdout.Bytes(), &stat); err == nil { + s := stat.Sweep + if s.Operations.Past.KeysProvided >= minCIDs && + s.Queues.PendingKeyProvides == 0 && + s.Operations.Ongoing.KeyReprovides == 0 { + return s.Operations.Past.KeysProvided + } + } + } + time.Sleep(500 * time.Millisecond) + } + t.Fatalf("sweep reprovide: expected at least %d CIDs provided within %s", minCIDs, timeout) + return 0 +} + func TestProvider(t *testing.T) { t.Parallel() variants := []struct { - name string - reprovide bool - apply cfgApplier + name string + sweep bool + apply cfgApplier + awaitReprovide awaitReprovideFunc }{ { - name: "LegacyProvider", - reprovide: true, + name: "LegacyProvider", + sweep: false, apply: func(n *harness.Node) { n.SetIPFSConfig("Provide.DHT.SweepEnabled", false) }, + // `routing reprovide` blocks until the cycle finishes. + // minCIDs is ignored (legacy has no stat counter). + awaitReprovide: func(t *testing.T, n *harness.Node, minCIDs int64) int64 { + n.IPFS("routing", "reprovide") + return minCIDs + }, }, { - name: "SweepingProvider", - reprovide: false, + name: "SweepingProvider", + sweep: true, apply: func(n *harness.Node) { n.SetIPFSConfig("Provide.DHT.SweepEnabled", true) }, + // No manual trigger exists for sweep. Poll `provide stat` + // until the reprovide cycle completes. + awaitReprovide: func(t *testing.T, n *harness.Node, minCIDs int64) int64 { + // 90s accounts for provider bootstrap time (connecting + // to ephemeral peers, measuring prefix length) before + // the 30s reprovide cycle starts. On CI with parallel + // tests, bootstrap can take 20-30s. + return waitForSweepReprovide(t, n, 90*time.Second, minCIDs) + }, }, } for _, v := range variants { t.Run(v.name, func(t *testing.T) { // t.Parallel() - runProviderSuite(t, v.reprovide, v.apply) + runProviderSuite(t, v.sweep, v.apply, v.awaitReprovide) // Resume tests only apply to SweepingProvider - if v.name == "SweepingProvider" { + if v.sweep { runResumeTests(t, v.apply) } }) } } +// TestProviderUniqueDedupLogging verifies that the +unique bloom filter +// deduplication produces a "skippedBranches" log with a value > 0 when +// two pins share content. Tests both the fast-provide-dag path (immediate +// provide on pin add) and the reprovide cycle path. +func TestProviderUniqueDedupLogging(t *testing.T) { + t.Parallel() + + // Shared data that both pins will reference. Two pins containing + // the same file block give the bloom something to dedup. + sharedData := random.Bytes(10 * 1024) // 10 KiB, single block + + t.Run("fast-provide-dag dedup across pins in single call", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.Strategy", "pinned+unique") + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Import.UnixFSChunker", "size-5120") // 5 KiB chunks + h.BootstrapWithStubDHT(harness.Nodes{node}) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + // dagwalker: bloom creation log + // core/commands/cmdenv: fast-provide-dag finished log + "GOLOG_LOG_LEVEL": "error,dagwalker=info,core/commands/cmdenv=info", + }), + }, + }, "") + defer node.StopDaemon() + + // 10 KiB file with 5 KiB chunks = 1 file root + 2 chunks = 3 blocks. + // Two dirs each containing the file under different names: + // dirA/fileA → same 3 blocks + // dirB/fileB → same 3 blocks + // Pinning both in a single `pin add` shares one bloom tracker. + // Walking dirA: dirA + file root + chunk1 + chunk2 = 4 provided. + // Walking dirB: dirB + file root (bloom hit, skip subtree) = 1 provided, 1 skipped. + // Total: 5 provided, 1 skipped branch (file root in dirB; its + // 2 chunks are never visited because the parent was skipped). + cidFile := node.IPFSAdd(bytes.NewReader(sharedData), "-Q", "--pin=false") + node.IPFS("files", "mkdir", "-p", "/dirA") + node.IPFS("files", "cp", "/ipfs/"+cidFile, "/dirA/fileA") + cidDirA := node.IPFS("files", "stat", "--hash", "/dirA").Stdout.Trimmed() + node.IPFS("files", "mkdir", "-p", "/dirB") + node.IPFS("files", "cp", "/ipfs/"+cidFile, "/dirB/fileB") + cidDirB := node.IPFS("files", "stat", "--hash", "/dirB").Stdout.Trimmed() + require.NotEqual(t, cidDirA, cidDirB, "dirs must differ to test dedup") + // Single pin add with both CIDs shares one bloom. + node.IPFS("pin", "add", "--fast-provide-dag", "--fast-provide-wait", cidDirA, cidDirB) + + daemonLog := node.Daemon.Stderr.String() + require.Contains(t, daemonLog, "bloom tracker created") + require.NotContains(t, daemonLog, "bloom tracker autoscaled") + require.Contains(t, daemonLog, `"providedCIDs": 5`) + require.Contains(t, daemonLog, `"skippedBranches": 1`) + }) + + t.Run("reprovide cycle dedup across pins", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + nodes := h.NewNodes(2).Init() + for _, n := range nodes { + n.SetIPFSConfig("Provide.Strategy", "pinned+unique") + n.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + n.SetIPFSConfig("Import.UnixFSChunker", "size-5120") // 5 KiB chunks + } + publisher := nodes[0] + publisher.SetIPFSConfig("Provide.DHT.Interval", "30s") + h.BootstrapWithStubDHT(nodes) + + // Same file structure as fast-provide-dag test above. + // The reprovide cycle walks all recursive pins: + // pin dirA: dirA + file root + chunk1 + chunk2 = 4 provided + // pin empty MFS root (always present): 1 provided + // pin dirB: dirB + file root (bloom hit, skip subtree) = 1 provided, 1 skipped + // Total: 6 provided, 1 skipped branch. + cidFile := publisher.IPFSAdd(bytes.NewReader(sharedData), "-Q", "--pin=false") + publisher.IPFS("files", "mkdir", "-p", "/dirA") + publisher.IPFS("files", "cp", "/ipfs/"+cidFile, "/dirA/fileA") + cidDirA := publisher.IPFS("files", "stat", "--hash", "/dirA").Stdout.Trimmed() + publisher.IPFS("pin", "add", cidDirA) + publisher.IPFS("files", "mkdir", "-p", "/dirB") + publisher.IPFS("files", "cp", "/ipfs/"+cidFile, "/dirB/fileB") + cidDirB := publisher.IPFS("files", "stat", "--hash", "/dirB").Stdout.Trimmed() + require.NotEqual(t, cidDirA, cidDirB, "dirs must differ to test dedup") + publisher.IPFS("pin", "add", cidDirB) + + nodes[0].StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,dagwalker=info,core:constructor=info", + }), + }, + }, "") + nodes[1].StartDaemon() + defer nodes.StopDaemons() + nodes.Connect() + + waitForSweepReprovide(t, publisher, 90*time.Second, 6) + + daemonLog := publisher.Daemon.Stderr.String() + require.Contains(t, daemonLog, "bloom tracker created") + require.NotContains(t, daemonLog, "bloom tracker autoscaled") + require.Contains(t, daemonLog, `"providedCIDs": 6`) + require.Contains(t, daemonLog, `"skippedBranches": 1`) + }) +} + +// TestProviderFastProvideDAGAsyncSurvives verifies that +// --fast-provide-dag without --fast-provide-wait runs a background +// DAG walk that outlives the command handler and publishes every +// block of the newly added DAG to the routing system. +// +// The async walk runs in a goroutine parented on the IpfsNode +// lifetime context (not req.Context), so it keeps running after +// `ipfs add` returns and is only cancelled on daemon shutdown. +// +// Provide.DHT.Interval is set high so the scheduled reprovide +// cycle cannot fire during the test window. That makes the async +// walk the only path that can publish non-root block CIDs. +func TestProviderFastProvideDAGAsyncSurvives(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + nodes := h.NewNodes(2).Init() + for _, n := range nodes { + n.SetIPFSConfig("Provide.Strategy", "pinned") + n.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + // Small chunks so a modest file produces many leaf blocks. + n.SetIPFSConfig("Import.UnixFSChunker", "size-1024") + } + publisher, peers := nodes[0], nodes[1:] + publisher.SetIPFSConfig("Provide.DHT.Interval", "1h") + h.BootstrapWithStubDHT(nodes) + + publisher.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands/cmdenv=info", + }), + }, + }, "") + nodes[1].StartDaemon() + defer nodes.StopDaemons() + nodes.Connect() + + // 16 KiB + 1 KiB chunks yields a file root plus many leaf + // blocks, so the providedCIDs count after the walk is + // unambiguous. + data := random.Bytes(16 * 1024) + cidFile := publisher.IPFSAdd(bytes.NewReader(data), "-Q", + "--pin=true", + "--fast-provide-dag=true", + // --fast-provide-wait deliberately omitted: the walk + // runs in the background after `ipfs add` returns. + ) + + // Pull a chunk CID out of the file DAG. Chunks are not pin + // roots, so fast-provide-root does not touch them; only the + // DAG walk can announce them. + dagOut := publisher.IPFS("dag", "get", cidFile) + var dagNode struct { + Links []struct { + Hash map[string]string `json:"Hash"` + } `json:"Links"` + } + require.NoError(t, json.Unmarshal(dagOut.Stdout.Bytes(), &dagNode)) + require.Greater(t, len(dagNode.Links), 1, "file should have multiple chunks") + cidChunk := dagNode.Links[0].Hash["/"] + require.NotEmpty(t, cidChunk) + + // The async walk logs "fast-provide-dag: finished" with a + // providedCIDs count on completion. A full walk of this file + // visits the root plus every leaf chunk, so the count is much + // larger than 2. + providedRe := regexp.MustCompile(`"providedCIDs": (\d+)`) + var providedCount int + require.Eventually(t, func() bool { + m := providedRe.FindStringSubmatch(publisher.Daemon.Stderr.String()) + if len(m) != 2 { + return false + } + n, err := strconv.Atoi(m[1]) + if err != nil { + return false + } + providedCount = n + return true + }, 30*time.Second, 200*time.Millisecond, "async fast-provide-dag walk did not log 'finished'") + + require.Greater(t, providedCount, 2, + "providedCIDs=%d is too small for a full walk of the file DAG", providedCount) + + // End-to-end: the peer can find the publisher as a provider + // for a chunk CID, which only the async walk could have + // announced within the test window. + pid := publisher.PeerID().String() + var found bool + for _, peer := range peers { + for i := time.Duration(0); i*timeStep < timeout; i++ { + res := peer.IPFS("routing", "findprovs", "-n=1", cidChunk) + if res.Stdout.Trimmed() == pid { + found = true + break + } + } + } + require.True(t, found, "chunk %s not announced by the async walk", cidChunk) +} + // TestHTTPOnlyProviderWithSweepEnabled tests that provider records are correctly // sent to HTTP routers when Routing.Type="custom" with only HTTP routers configured, // even when Provide.DHT.SweepEnabled=true (the default since v0.39). @@ -842,3 +1467,121 @@ func TestHTTPOnlyProviderWithSweepEnabled(t *testing.T) { assert.Contains(t, statRes.Stdout.String(), "TotalReprovides:", "should show legacy provider stats") } + +// TestProviderKeystoreDatastoreCompaction verifies that the SweepingProvider's +// keystore uses a datastore factory that creates separate physical datastores +// and reclaims disk space by deleting old datastores after each reset cycle. +// +// The keystore uses two alternating namespaces ("0" and "1") plus a "meta" +// namespace. The lifecycle is: +// 1. First start: namespace "0" is created as the initial active datastore +// 2. First reset (keystore sync at startup): "1" is created, data is written, +// namespaces swap, "0" is destroyed from disk via os.RemoveAll +// 3. Restart: "1" and "meta" survive on disk +// 4. Second reset: "0" is recreated, namespaces swap, "1" is destroyed +func TestProviderKeystoreDatastorePurge(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{}) + + // Add content offline so the keystore has something to sync on startup. + for i := range 5 { + node.IPFSAddStr(fmt.Sprintf("keystore-compaction-test-%d", i)) + } + + keystoreBase := filepath.Join(node.Dir, "provider-keystore") + ns0 := filepath.Join(keystoreBase, "0") + ns1 := filepath.Join(keystoreBase, "1") + + // Directory should not exist before starting the daemon. + _, err := os.Stat(keystoreBase) + require.True(t, os.IsNotExist(err), "provider-keystore should not exist before daemon start") + + // --- First start: triggers keystore sync (ResetCids) --- + // Init creates "0", then reset swaps to "1" and destroys "0". + node.StartDaemon() + + require.Eventually(t, func() bool { + return dirExists(ns1) && !dirExists(ns0) + }, 30*time.Second, 200*time.Millisecond, + "after first reset: ns1 should exist, ns0 should be destroyed") + + // --- Restart: triggers a second keystore sync (ResetCids) --- + // Reset swaps back to "0" and destroys "1". + node.StopDaemon() + + // Between restarts: ns1 survives on disk, ns0 does not. + assert.True(t, dirExists(ns1), "ns1 should survive shutdown") + assert.False(t, dirExists(ns0), "ns0 should not reappear between restarts") + + node.StartDaemon() + + require.Eventually(t, func() bool { + return dirExists(ns0) && !dirExists(ns1) + }, 30*time.Second, 200*time.Millisecond, + "after second reset: ns0 should exist, ns1 should be destroyed") + + node.StopDaemon() +} + +// TestProviderKeystoreMigrationPurge verifies that orphaned keystore data +// left in the shared repo datastore by older Kubo versions is purged on +// the first sweep-enabled daemon start. The migration is triggered by the +// absence of the /provider-keystore/ directory. +func TestProviderKeystoreMigrationPurge(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{}) + + keystoreBase := filepath.Join(node.Dir, "provider-keystore") + + // Pre-seed orphaned keystore data into the shared datastore, simulating + // the layout produced by older Kubo that stored keystore entries inline. + const numOrphans = 10 + for i := range numOrphans { + node.DatastorePut( + fmt.Sprintf("/provider/keystore/%d/fake-key-%d", i%2, i), + fmt.Sprintf("orphan-%d", i), + ) + } + + // The orphaned keys should be visible via diag datastore. + count := node.DatastoreCount("/provider/keystore/") + require.Equal(t, int64(numOrphans), count, "orphaned keys should be present before migration") + + // The provider-keystore directory must not exist yet (its absence + // triggers the migration). + require.False(t, dirExists(keystoreBase), + "provider-keystore/ should not exist before first sweep-enabled start") + + // Start the daemon: this triggers the one-time migration purge. + node.StartDaemon() + node.StopDaemon() + + // After migration the seeded orphaned keys should be gone from the + // shared datastore. The diag datastore count command mounts the + // separate provider-keystore datastores, so we check for the specific + // fake keys we seeded to confirm they were purged. + for i := range numOrphans { + key := fmt.Sprintf("/provider/keystore/%d/fake-key-%d", i%2, i) + assert.False(t, node.DatastoreHasKey(key), + "orphaned key %s should be purged after migration", key) + } + + // The provider-keystore directory should now exist. + assert.True(t, dirExists(keystoreBase), + "provider-keystore/ should exist after sweep-enabled daemon ran") +} + +func dirExists(path string) bool { + info, err := os.Stat(path) + return err == nil && info.IsDir() +} diff --git a/test/cli/update_test.go b/test/cli/update_test.go new file mode 100644 index 00000000000..02ce49b1dd1 --- /dev/null +++ b/test/cli/update_test.go @@ -0,0 +1,514 @@ +package cli + +import ( + "archive/tar" + "archive/zip" + "bytes" + "compress/gzip" + "crypto/sha512" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestUpdate exercises the built-in "ipfs update" command tree against +// the real GitHub Releases API. Network access is required. +// +// The node is created without Init or daemon, so install/revert error +// paths that don't depend on a running daemon can be tested. +func TestUpdate(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + node := h.NewNode() + + t.Run("help text describes the command", func(t *testing.T) { + t.Parallel() + res := node.IPFS("update", "--help") + assert.Contains(t, res.Stdout.String(), "Update Kubo to a different version") + }) + + // check and versions are read-only GitHub API queries. They must work + // regardless of daemon state, since users need to check for updates + // before deciding whether to stop the daemon and install. + t.Run("check", func(t *testing.T) { + t.Parallel() + + t.Run("text output reports update availability", func(t *testing.T) { + t.Parallel() + res := node.IPFS("update", "check") + out := res.Stdout.String() + assert.True(t, + strings.Contains(out, "Update available") || strings.Contains(out, "Already up to date"), + "expected update status message, got: %s", out) + }) + + t.Run("json output includes version fields", func(t *testing.T) { + t.Parallel() + res := node.IPFS("update", "check", "--enc=json") + var result struct { + CurrentVersion string + LatestVersion string + UpdateAvailable bool + } + err := json.Unmarshal(res.Stdout.Bytes(), &result) + require.NoError(t, err, "invalid JSON: %s", res.Stdout.String()) + assert.NotEmpty(t, result.CurrentVersion, "must report current version") + assert.NotEmpty(t, result.LatestVersion, "must report latest version") + }) + }) + + t.Run("versions", func(t *testing.T) { + t.Parallel() + + t.Run("lists available versions", func(t *testing.T) { + t.Parallel() + res := node.IPFS("update", "versions") + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assert.Greater(t, len(lines), 0, "should list at least one version") + }) + + t.Run("respects --count flag", func(t *testing.T) { + t.Parallel() + res := node.IPFS("update", "versions", "--count=5") + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assert.LessOrEqual(t, len(lines), 5) + }) + + t.Run("json output includes current version and list", func(t *testing.T) { + t.Parallel() + res := node.IPFS("update", "versions", "--count=3", "--enc=json") + var result struct { + Current string + Versions []string + } + err := json.Unmarshal(res.Stdout.Bytes(), &result) + require.NoError(t, err, "invalid JSON: %s", res.Stdout.String()) + assert.NotEmpty(t, result.Current, "must report current version") + assert.NotEmpty(t, result.Versions, "must list at least one version") + }) + + t.Run("--pre includes prerelease versions", func(t *testing.T) { + t.Parallel() + res := node.IPFS("update", "versions", "--count=5", "--pre") + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assert.Greater(t, len(lines), 0, "should list at least one version") + }) + }) + + // install and revert mutate the binary on disk, so they have stricter + // preconditions. These tests verify the error paths. + t.Run("install rejects same version", func(t *testing.T) { + t.Parallel() + vRes := node.IPFS("version", "-n") + current := strings.TrimSpace(vRes.Stdout.String()) + + res := node.RunIPFS("update", "install", current) + assert.Error(t, res.Err) + assert.Contains(t, res.Stderr.String(), "already running version", + "should refuse to re-install the current version") + }) + + t.Run("revert fails when no backup exists", func(t *testing.T) { + t.Parallel() + res := node.RunIPFS("update", "revert") + assert.Error(t, res.Err) + assert.Contains(t, res.Stderr.String(), "no stashed binaries", + "should explain there is no previous version to restore") + }) +} + +// TestUpdateWhileDaemonRuns verifies that read-only update subcommands +// (check, versions) work while the IPFS daemon holds the repo lock. +// These commands only query the GitHub API and never touch the repo, +// so they must succeed regardless of daemon state. +func TestUpdateWhileDaemonRuns(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + t.Run("check succeeds with daemon running", func(t *testing.T) { + t.Parallel() + res := node.IPFS("update", "check") + out := res.Stdout.String() + assert.True(t, + strings.Contains(out, "Update available") || strings.Contains(out, "Already up to date"), + "check must work while daemon runs, got: %s", out) + }) + + t.Run("versions succeeds with daemon running", func(t *testing.T) { + t.Parallel() + res := node.IPFS("update", "versions", "--count=3") + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assert.Greater(t, len(lines), 0, + "versions must work while daemon runs") + }) +} + +// TestUpdateInstall exercises the full install flow end-to-end: +// API query, archive download, SHA-512 verification, tar.gz extraction, +// binary stash (backup), and atomic replace. +// +// A local mock HTTP server replaces GitHub so the test is fast, offline, +// and deterministic. The built ipfs binary is copied to a temp directory +// so the install replaces the copy, not the real build artifact. +// +// The env var TEST_KUBO_UPDATE_GITHUB_URL redirects the binary's GitHub +// API calls to the mock server. TEST_KUBO_VERSION makes the binary +// report a specific version so the "upgrade" to v0.99.0 is deterministic. +func TestUpdateInstall(t *testing.T) { + // Not t.Parallel(): this test writes a copy of the ipfs binary and + // then exec's it. Running in parallel with other tests exposes the + // ETXTBSY race where a concurrent fork() in another test goroutine + // inherits our still-open write fd, leaving the freshly written + // file "text file busy" for exec until the sibling child execs. + // Running sequentially guarantees no other goroutine is mid-fork + // while we're writing. + + // Build a fake binary to put inside the archive. After install, the + // file at tmpBinPath should contain exactly these bytes. + fakeBinary := []byte("#!/bin/sh\necho fake-ipfs-v0.99.0\n") + + // Archive entry path: extractBinaryFromArchive looks for "kubo/". + binName := "ipfs" + if runtime.GOOS == "windows" { + binName = "ipfs.exe" + } + var archive []byte + if runtime.GOOS == "windows" { + archive = buildTestZip(t, "kubo/"+binName, fakeBinary) + } else { + archive = buildTestTarGz(t, "kubo/"+binName, fakeBinary) + } + + // Compute SHA-512 of the archive for the .sha512 sidecar file. + sum := sha512.Sum512(archive) + + // Asset name must match what findReleaseAsset expects for the + // current OS/arch (e.g., kubo_v0.99.0_linux-amd64.tar.gz). + ext := "tar.gz" + if runtime.GOOS == "windows" { + ext = "zip" + } + assetName := fmt.Sprintf("kubo_v0.99.0_%s-%s.%s", runtime.GOOS, runtime.GOARCH, ext) + checksumBody := fmt.Sprintf("%x %s\n", sum[:], assetName) + + // Mock server: serves GitHub Releases API, archive, and .sha512 sidecar. + // srvURL is captured after the server starts, so the handler can build + // browser_download_url values pointing back to itself. + var srvURL string + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + // githubReleaseByTag: GET /tags/v0.99.0 + case "/tags/v0.99.0": + rel := map[string]any{ + "tag_name": "v0.99.0", + "prerelease": false, + "assets": []map[string]any{{ + "name": assetName, + "browser_download_url": srvURL + "/download/" + assetName, + }}, + } + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(rel) + + // downloadAsset: GET /download/.tar.gz + case "/download/" + assetName: + _, _ = w.Write(archive) + + // downloadAndVerifySHA512: GET /download/.tar.gz.sha512 + case "/download/" + assetName + ".sha512": + _, _ = w.Write([]byte(checksumBody)) + + default: + http.NotFound(w, r) + } + })) + t.Cleanup(srv.Close) + srvURL = srv.URL + + // Copy the real built binary to a temp directory. The install command + // uses os.Executable() to find the binary to replace, so the subprocess + // will replace this copy instead of the real build artifact. + tmpBinDir := t.TempDir() + tmpBinPath := filepath.Join(tmpBinDir, binName) + copyBuiltBinary(t, tmpBinPath) + + // Create a harness that uses the temp binary copy. + h := harness.NewT(t, func(h *harness.Harness) { + h.IPFSBin = tmpBinPath + }) + node := h.NewNode() + + // Make the binary report v0.30.0 so the "upgrade" to v0.99.0 has a + // deterministic from-version. Point API calls at the mock server. + node.Runner.Env["TEST_KUBO_VERSION"] = "0.30.0" + node.Runner.Env["TEST_KUBO_UPDATE_GITHUB_URL"] = srvURL + + // Run: ipfs update install v0.99.0 + res := node.RunIPFS("update", "install", "v0.99.0") + require.NoError(t, res.Err, "install failed; stderr:\n%s", res.Stderr.String()) + + // Verify progress messages on stderr. + stderr := res.Stderr.String() + assert.Contains(t, stderr, "Downloading Kubo 0.99.0", + "should show download progress") + assert.Contains(t, stderr, "Checksum verified (SHA-512)", + "should confirm checksum passed") + assert.Contains(t, stderr, "Backed up current binary to", + "should report where the old binary was stashed") + + // Verify the stash: the original binary should be saved to + // $IPFS_PATH/old-bin/ipfs-0.30.0 (with .exe on Windows). + stashName := "ipfs-0.30.0" + if runtime.GOOS == "windows" { + stashName += ".exe" + } + stashPath := filepath.Join(node.Dir, "old-bin", stashName) + _, err := os.Stat(stashPath) + require.NoError(t, err, "stash file should exist at %s", stashPath) + + // On Windows the OS locks the executable of a running process, so + // atomicfile cannot rename over it. The install command falls back + // to saving the new binary to a temp path with manual move instructions. + if runtime.GOOS == "windows" && strings.Contains(stderr, "Move it manually") { + assert.Contains(t, stderr, "Could not replace", + "should explain why in-place replacement failed") + assert.Contains(t, stderr, "New binary saved to:", + "should print where the new binary was saved") + + // Extract the temp path from stderr and verify the file exists + // with the expected content. + for line := range strings.SplitSeq(stderr, "\n") { + if savedPath, ok := strings.CutPrefix(line, "New binary saved to: "); ok { + savedPath = strings.TrimSpace(savedPath) + got, err := os.ReadFile(savedPath) + require.NoError(t, err, "new binary should exist at %s", savedPath) + assert.Equal(t, fakeBinary, got, + "binary at %s should contain the extracted archive content", savedPath) + break + } + } + } else { + // Non-Windows (or Windows where in-place replace succeeded): + // binary was replaced atomically. + assert.Contains(t, stderr, "Successfully updated Kubo 0.30.0 -> 0.99.0", + "should confirm the version change") + got, err := os.ReadFile(tmpBinPath) + require.NoError(t, err) + assert.Equal(t, fakeBinary, got, + "binary at %s should contain the extracted archive content", tmpBinPath) + } +} + +// TestUpdateRevert exercises the full revert flow end-to-end: reading +// a stashed binary from $IPFS_PATH/old-bin/, atomically replacing the +// current binary, and cleaning up the stash file. +// +// The stash is created manually (rather than via install) so this test +// is self-contained and does not depend on network access or a mock server. +// +// How it works: the subprocess runs from tmpBinPath, so os.Executable() +// inside the subprocess returns tmpBinPath. The revert command reads the +// stash and atomically replaces the file at tmpBinPath with stash content. +func TestUpdateRevert(t *testing.T) { + // Not t.Parallel(): same ETXTBSY rationale as TestUpdateInstall. + // This test writes a binary copy and exec's it, which must not + // overlap with concurrent fork() calls from other test goroutines. + + binName := "ipfs" + if runtime.GOOS == "windows" { + binName = "ipfs.exe" + } + + // Copy the real built binary to a temp directory. Revert will replace + // this copy with the stash content via os.Executable() -> tmpBinPath. + tmpBinDir := t.TempDir() + tmpBinPath := filepath.Join(tmpBinDir, binName) + copyBuiltBinary(t, tmpBinPath) + + h := harness.NewT(t, func(h *harness.Harness) { + h.IPFSBin = tmpBinPath + }) + node := h.NewNode() + + // Create a stash directory with known content that differs from the + // current binary. findLatestStash looks for ipfs- files. + stashDir := filepath.Join(node.Dir, "old-bin") + require.NoError(t, os.MkdirAll(stashDir, 0o755)) + stashName := "ipfs-0.30.0" + if runtime.GOOS == "windows" { + stashName = "ipfs-0.30.0.exe" + } + stashPath := filepath.Join(stashDir, stashName) + stashContent := []byte("#!/bin/sh\necho reverted-to-0.30.0\n") + require.NoError(t, os.WriteFile(stashPath, stashContent, 0o755)) + + // Run: ipfs update revert + // The subprocess executes from tmpBinPath (a real ipfs binary). + // os.Executable() returns tmpBinPath, so revert replaces that file + // with stashContent and removes the stash file. + res := node.RunIPFS("update", "revert") + require.NoError(t, res.Err, "revert failed; stderr:\n%s", res.Stderr.String()) + + stderr := res.Stderr.String() + + // On Windows the OS locks the running binary, so the revert falls + // back to saving to a temp path with manual move instructions. + if runtime.GOOS == "windows" && strings.Contains(stderr, "Move it manually") { + assert.Contains(t, stderr, "Could not replace", + "should explain why in-place replacement failed") + assert.Contains(t, stderr, "Reverted binary saved to:", + "should print where the reverted binary was saved") + + // Verify the saved binary has the stash content. + for line := range strings.SplitSeq(stderr, "\n") { + if savedPath, ok := strings.CutPrefix(line, "Reverted binary saved to: "); ok { + savedPath = strings.TrimSpace(savedPath) + got, err := os.ReadFile(savedPath) + require.NoError(t, err, "reverted binary should exist at %s", savedPath) + assert.Equal(t, stashContent, got, + "binary at %s should contain the stash content", savedPath) + break + } + } + } else { + // Non-Windows: binary was replaced in place. + assert.Contains(t, stderr, "Reverted to Kubo 0.30.0", + "should confirm which version was restored") + + // Verify the stash file was cleaned up after successful revert. + _, err := os.Stat(stashPath) + assert.True(t, os.IsNotExist(err), + "stash file should be removed after revert, but still exists at %s", stashPath) + + // Verify the binary was replaced with the stash content. + got, err := os.ReadFile(tmpBinPath) + require.NoError(t, err) + assert.Equal(t, stashContent, got, + "binary at %s should contain the stash content after revert", tmpBinPath) + } +} + +// TestUpdateClean exercises the cleanup command that drops every backed-up +// Kubo binary from $IPFS_PATH/old-bin/. The test stages a stash directory +// directly so it doesn't need network access or a real install. +func TestUpdateClean(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + node := h.NewNode() + + stashDir := filepath.Join(node.Dir, "old-bin") + require.NoError(t, os.MkdirAll(stashDir, 0o755)) + + binSuffix := "" + if runtime.GOOS == "windows" { + binSuffix = ".exe" + } + stashFiles := []string{ + "ipfs-0.30.0" + binSuffix, + "ipfs-0.31.0" + binSuffix, + "ipfs-0.32.0" + binSuffix, + } + for _, name := range stashFiles { + require.NoError(t, os.WriteFile(filepath.Join(stashDir, name), []byte("fake"), 0o755)) + } + // A file that does not match ipfs- must be left alone so users + // can store unrelated notes or scripts in old-bin/ without losing them. + unrelated := filepath.Join(stashDir, "notes.txt") + require.NoError(t, os.WriteFile(unrelated, []byte("keep me"), 0o644)) + + t.Run("removes all stashed binaries", func(t *testing.T) { + res := node.IPFS("update", "clean") + out := res.Stdout.String() + for _, name := range stashFiles { + assert.Contains(t, out, name, "should report removing %s", name) + _, err := os.Stat(filepath.Join(stashDir, name)) + assert.True(t, os.IsNotExist(err), "%s should be removed from disk", name) + } + _, err := os.Stat(unrelated) + require.NoError(t, err, "unrelated files in old-bin/ must not be touched") + }) + + t.Run("reports nothing on empty stash", func(t *testing.T) { + res := node.IPFS("update", "clean") + assert.Contains(t, res.Stdout.String(), "No stashed binaries to remove") + }) + + t.Run("json output lists removed files and bytes freed", func(t *testing.T) { + // Re-create one stash file to verify the JSON encoder. + name := "ipfs-0.33.0" + binSuffix + require.NoError(t, os.WriteFile(filepath.Join(stashDir, name), []byte("data"), 0o755)) + + res := node.IPFS("update", "clean", "--enc=json") + var result struct { + Removed []string + BytesFreed int64 + } + err := json.Unmarshal(res.Stdout.Bytes(), &result) + require.NoError(t, err, "invalid JSON: %s", res.Stdout.String()) + assert.Equal(t, []string{name}, result.Removed) + assert.Equal(t, int64(4), result.BytesFreed) + }) +} + +// --- test helpers --- + +// copyBuiltBinary copies the built ipfs binary (cmd/ipfs/ipfs) to dst. +// It locates the project root the same way the test harness does. +func copyBuiltBinary(t *testing.T, dst string) { + t.Helper() + // Use a throwaway harness to resolve the default binary path, + // reusing the same project-root lookup the harness already has. + h := harness.NewT(t) + srcBin := h.IPFSBin + // The harness hardcodes "ipfs" without .exe suffix, but on Windows + // the built binary is "ipfs.exe". + if runtime.GOOS == "windows" && !strings.HasSuffix(srcBin, ".exe") { + srcBin += ".exe" + } + data, err := os.ReadFile(srcBin) + require.NoError(t, err, "failed to read built binary at %s (did you run 'make build'?)", srcBin) + require.NoError(t, os.MkdirAll(filepath.Dir(dst), 0o755)) + require.NoError(t, os.WriteFile(dst, data, 0o755)) +} + +// buildTestTarGz creates an in-memory tar.gz archive with a single file entry. +func buildTestTarGz(t *testing.T, path string, content []byte) []byte { + t.Helper() + var buf bytes.Buffer + gzw := gzip.NewWriter(&buf) + tw := tar.NewWriter(gzw) + require.NoError(t, tw.WriteHeader(&tar.Header{ + Name: path, + Mode: 0o755, + Size: int64(len(content)), + })) + _, err := tw.Write(content) + require.NoError(t, err) + require.NoError(t, tw.Close()) + require.NoError(t, gzw.Close()) + return buf.Bytes() +} + +// buildTestZip creates an in-memory zip archive with a single file entry. +func buildTestZip(t *testing.T, path string, content []byte) []byte { + t.Helper() + var buf bytes.Buffer + zw := zip.NewWriter(&buf) + fw, err := zw.Create(path) + require.NoError(t, err) + _, err = fw.Write(content) + require.NoError(t, err) + require.NoError(t, zw.Close()) + return buf.Bytes() +} diff --git a/test/dependencies/dependencies.go b/test/dependencies/dependencies.go index 848ffba2fab..59765cc0e59 100644 --- a/test/dependencies/dependencies.go +++ b/test/dependencies/dependencies.go @@ -1,3 +1,4 @@ +// Tracks test tool dependencies in go.mod without importing them in production. //go:build tools package tools diff --git a/test/dependencies/go.mod b/test/dependencies/go.mod index bb890a1c195..58e8053d1c3 100644 --- a/test/dependencies/go.mod +++ b/test/dependencies/go.mod @@ -1,6 +1,6 @@ module github.com/ipfs/kubo/test/dependencies -go 1.25.0 +go 1.26.2 replace github.com/ipfs/kubo => ../../ @@ -9,7 +9,7 @@ require ( github.com/golangci/golangci-lint v1.64.8 github.com/ipfs/go-cidutil v0.1.1 github.com/ipfs/go-log/v2 v2.9.1 - github.com/ipfs/go-test v0.2.3 + github.com/ipfs/go-test v0.3.0 github.com/ipfs/hang-fds v0.1.0 github.com/ipfs/iptb v1.4.1 github.com/ipfs/iptb-plugins v0.5.1 @@ -75,7 +75,7 @@ require ( github.com/daixiang0/gci v0.13.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect github.com/dnephin/pflag v1.0.7 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -127,25 +127,25 @@ require ( github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect - github.com/hashicorp/go-version v1.8.0 // indirect + github.com/hashicorp/go-version v1.9.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/boxo v0.37.0 // indirect + github.com/ipfs/bbloom v0.1.0 // indirect + github.com/ipfs/boxo v0.38.0 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-block-format v0.2.3 // indirect - github.com/ipfs/go-cid v0.6.0 // indirect + github.com/ipfs/go-cid v0.6.1 // indirect github.com/ipfs/go-datastore v0.9.1 // indirect github.com/ipfs/go-dsqueue v0.2.0 // indirect github.com/ipfs/go-ipfs-cmds v0.16.0 // indirect github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect github.com/ipfs/go-ipld-cbor v0.2.1 // indirect github.com/ipfs/go-ipld-format v0.6.3 // indirect - github.com/ipfs/go-ipld-legacy v0.2.2 // indirect + github.com/ipfs/go-ipld-legacy v0.3.0 // indirect github.com/ipfs/go-metrics-interface v0.3.0 // indirect github.com/ipfs/go-unixfsnode v1.10.3 // indirect github.com/ipfs/kubo v0.31.0 // indirect @@ -181,9 +181,9 @@ require ( github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-doh-resolver v0.5.0 // indirect github.com/libp2p/go-flow-metrics v0.3.0 // indirect - github.com/libp2p/go-libp2p v0.47.0 // indirect + github.com/libp2p/go-libp2p v0.48.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kad-dht v0.38.0 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.39.1-0.20260326020727-bcbc21e9f633 // indirect github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect github.com/libp2p/go-libp2p-record v0.3.1 // indirect github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect @@ -207,12 +207,12 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moricho/tparallel v0.3.2 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect + github.com/mr-tron/base58 v1.3.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.5.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multibase v0.3.0 // indirect github.com/multiformats/go-multicodec v0.10.0 // indirect github.com/multiformats/go-multistream v0.6.1 // indirect github.com/multiformats/go-varint v0.1.0 // indirect @@ -226,21 +226,18 @@ require ( github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/pion/datachannel v1.5.10 // indirect - github.com/pion/dtls/v2 v2.2.12 // indirect - github.com/pion/dtls/v3 v3.1.1 // indirect + github.com/pion/dtls/v3 v3.1.2 // indirect github.com/pion/ice/v4 v4.0.10 // indirect github.com/pion/interceptor v0.1.40 // indirect github.com/pion/logging v0.2.4 // indirect github.com/pion/mdns/v2 v2.0.7 // indirect github.com/pion/randutil v0.1.0 // indirect - github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtcp v1.2.16 // indirect github.com/pion/rtp v1.8.19 // indirect github.com/pion/sctp v1.8.39 // indirect - github.com/pion/sdp/v3 v3.0.13 // indirect + github.com/pion/sdp/v3 v3.0.18 // indirect github.com/pion/srtp/v3 v3.0.6 // indirect - github.com/pion/stun v0.6.1 // indirect - github.com/pion/stun/v3 v3.0.0 // indirect - github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/stun/v3 v3.1.1 // indirect github.com/pion/transport/v3 v3.0.7 // indirect github.com/pion/transport/v4 v4.0.1 // indirect github.com/pion/turn/v4 v4.0.2 // indirect @@ -251,8 +248,8 @@ require ( github.com/polyfloyd/go-errorlint v1.7.1 // indirect github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.17.0 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/procfs v0.20.1 // indirect github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect @@ -316,28 +313,28 @@ require ( go-simpler.org/musttag v0.13.0 // indirect go-simpler.org/sloglint v0.9.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect - go.opentelemetry.io/otel v1.40.0 // indirect - go.opentelemetry.io/otel/metric v1.40.0 // indirect - go.opentelemetry.io/otel/trace v1.40.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect + go.opentelemetry.io/otel v1.42.0 // indirect + go.opentelemetry.io/otel/metric v1.42.0 // indirect + go.opentelemetry.io/otel/trace v1.42.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/dig v1.19.0 // indirect go.uber.org/fx v1.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.1 // indirect go.uber.org/zap/exp v0.3.0 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect - golang.org/x/crypto v0.48.0 // indirect - golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect + go.yaml.in/yaml/v2 v2.4.4 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 // indirect golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect - golang.org/x/mod v0.33.0 // indirect - golang.org/x/net v0.50.0 // indirect - golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.41.0 // indirect - golang.org/x/term v0.40.0 // indirect - golang.org/x/text v0.34.0 // indirect + golang.org/x/mod v0.34.0 // indirect + golang.org/x/net v0.52.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect + golang.org/x/term v0.42.0 // indirect + golang.org/x/text v0.36.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.42.0 // indirect + golang.org/x/tools v0.43.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.17.0 // indirect google.golang.org/protobuf v1.36.11 // indirect diff --git a/test/dependencies/go.sum b/test/dependencies/go.sum index 2c131ae4576..f710ab63f9b 100644 --- a/test/dependencies/go.sum +++ b/test/dependencies/go.sum @@ -41,6 +41,10 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5 h1:JA0fFr+kxpqTdxR9LOBiTWpGNchqmkcsgmdeJZRclZ0= +filippo.io/bigmod v0.1.1-0.20260103110540-f8a47775ebe5/go.mod h1:OjOXDNlClLblvXdwgFFOQFJEocLhhtai8vGLy0JCZlI= +filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b h1:REI1FbdW71yO56Are4XAxD+OS/e+BQsB3gE4mZRQEXY= +filippo.io/keygen v0.0.0-20260114151900-8e2790ea4c5b/go.mod h1:9nnw1SlYHYuPSo/3wjQzNjSbeHlq2NsKo5iEtfJPWP0= github.com/4meepo/tagalign v1.4.2 h1:0hcLHPGMjDyM1gHG58cS73aQF8J4TdVR96TZViorO9E= github.com/4meepo/tagalign v1.4.2/go.mod h1:+p4aMyFM+ra7nb41CnFG6aSDXqRxU/w1VQqScKqDARI= github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE= @@ -190,8 +194,8 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1 h1:5RVFMOWjMyRy8cARdy79nAmgYw3hK/4HUq48LQ6Wwqo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.1/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= @@ -422,8 +426,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= -github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.9.0 h1:CeOIz6k+LoN3qX9Z0tyQrPtiB1DFYRPfCIBtaXPSCnA= +github.com/hashicorp/go-version v1.9.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -446,16 +450,16 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= -github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.37.0 h1:2E3mZvydMI2t5IkAgtkmZ3sGsld0oS7o3I+xyzDk6uI= -github.com/ipfs/boxo v0.37.0/go.mod h1:8yyiRn54F2CsW13n0zwXEPrVsZix/gFj9SYIRYMZ6KE= +github.com/ipfs/bbloom v0.1.0 h1:nIWwfIE3AaG7RCDQIsrUonGCOTp7qSXzxH7ab/ss964= +github.com/ipfs/bbloom v0.1.0/go.mod h1:lDy3A3i6ndgEW2z1CaRFvDi5/ZTzgM1IxA/pkL7Wgts= +github.com/ipfs/boxo v0.38.0 h1:Kt/swuNXAtVXs7EP6KEjB5+2lo5/tTrvWzjakQ8IiOo= +github.com/ipfs/boxo v0.38.0/go.mod h1:A6DRpImSXihx6MiEHOeBjXqleDqK5JX3yWDxM0WygPo= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk= github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA= -github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30= -github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ= +github.com/ipfs/go-cid v0.6.1 h1:T5TnNb08+ueovG76Z5gx1L4Y7QOaGTXHg1F6raWFxIc= +github.com/ipfs/go-cid v0.6.1/go.mod h1:zrY0SwOhjrrIdfPQ/kf+k1sXyJ0QE7cMxfCployLBs0= github.com/ipfs/go-cidutil v0.1.1 h1:COuby6H8C2ml0alvHYX3WdbFM4F07YtbY0UlT5j+sgI= github.com/ipfs/go-cidutil v0.1.1/go.mod h1:SCoUftGEUgoXe5Hjeyw5CiLZF8cwYn/TbtpFQXJCP6k= github.com/ipfs/go-datastore v0.9.1 h1:67Po2epre/o0UxrmkzdS9ZTe2GFGODgTd2odx8Wh6Yo= @@ -478,16 +482,16 @@ github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A= github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rAh4j8= github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk= -github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ= -github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU= +github.com/ipfs/go-ipld-legacy v0.3.0 h1:7XhFKkRyCvP5upOlQfKUFIqL3S5DEZnbUE4bQmQ/tNE= +github.com/ipfs/go-ipld-legacy v0.3.0/go.mod h1:Ukef9ARQiX+RVetwH2XiReLgJvQDEXcUPszrZ1KRjKI= github.com/ipfs/go-log/v2 v2.9.1 h1:3JXwHWU31dsCpvQ+7asz6/QsFJHqFr4gLgQ0FWteujk= github.com/ipfs/go-log/v2 v2.9.1/go.mod h1:evFx7sBiohUN3AG12mXlZBw5hacBQld3ZPHrowlJYoo= github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= github.com/ipfs/go-peertaskqueue v0.8.3 h1:tBPpGJy+A92RqtRFq5amJn0Uuj8Pw8tXi0X3eHfHM8w= github.com/ipfs/go-peertaskqueue v0.8.3/go.mod h1:OqVync4kPOcXEGdj/LKvox9DCB5mkSBeXsPczCxLtYA= -github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= -github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= +github.com/ipfs/go-test v0.3.0 h1:0Y4Uve3tp9HI+2lIJjfOliOrOgv/YpXg/l1y3P4DEYE= +github.com/ipfs/go-test v0.3.0/go.mod h1:JK+U8pRpATZb7lsYNSJlCj3WYB3cFfWIbI6nWRM/GFk= github.com/ipfs/go-unixfsnode v1.10.3 h1:c8sJjuGNkxXAQH75P+f5ngPda/9T+DrboVA0TcDGvGI= github.com/ipfs/go-unixfsnode v1.10.3/go.mod h1:2Jlc7DoEwr12W+7l8Hr6C7XF4NHST3gIkqSArLhGSxU= github.com/ipfs/hang-fds v0.1.0 h1:deBiFlWHsVGzJ0ZMaqscEqRM1r2O1rFZ59UiQXb1Xko= @@ -574,12 +578,12 @@ github.com/libp2p/go-doh-resolver v0.5.0 h1:4h7plVVW+XTS+oUBw2+8KfoM1jF6w8XmO7+s github.com/libp2p/go-doh-resolver v0.5.0/go.mod h1:aPDxfiD2hNURgd13+hfo29z9IC22fv30ee5iM31RzxU= github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= -github.com/libp2p/go-libp2p v0.47.0 h1:qQpBjSCWNQFF0hjBbKirMXE9RHLtSuzTDkTfr1rw0yc= -github.com/libp2p/go-libp2p v0.47.0/go.mod h1:s8HPh7mMV933OtXzONaGFseCg/BE//m1V34p3x4EUOY= +github.com/libp2p/go-libp2p v0.48.0 h1:h2BrLAgrj7X8bEN05K7qmrjpNHYA+6tnsGRdprjTnvo= +github.com/libp2p/go-libp2p v0.48.0/go.mod h1:Q1fBZNdmC2Hf82husCTfkKJVfHm2we5zk+NWmOGEmWk= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.38.0 h1:NToFzwvICo6ghDfSwuTmROCtl9LDXSZT1VawEbm4NUs= -github.com/libp2p/go-libp2p-kad-dht v0.38.0/go.mod h1:g/CefQilAnCMyUH52A6tUGbe17NgQ8q26MaZCA968iI= +github.com/libp2p/go-libp2p-kad-dht v0.39.1-0.20260326020727-bcbc21e9f633 h1:PcubpdBr1BBg39st+CqGp3EOX++DOBK6B/s07P31eMg= +github.com/libp2p/go-libp2p-kad-dht v0.39.1-0.20260326020727-bcbc21e9f633/go.mod h1:Po2JugFEkDq9Vig/JXtc153ntOi0q58o4j7IuITCOVs= github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= @@ -659,8 +663,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.3.0 h1:K6Y13R2h+dku0wOqKtecgRnBUBPrZzLZy5aIj8lCcJI= +github.com/mr-tron/base58 v1.3.0/go.mod h1:2BuubE67DCSWwVfx37JWNG8emOC0sHEU4/HpcYgCLX8= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= @@ -672,8 +676,8 @@ github.com/multiformats/go-multiaddr-dns v0.5.0 h1:p/FTyHKX0nl59f+S+dEUe8HRK+i5O github.com/multiformats/go-multiaddr-dns v0.5.0/go.mod h1:yJ349b8TPIAANUyuOzn1oz9o22tV9f+06L+cCeMxC14= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multibase v0.3.0 h1:8helZD2+4Db7NNWFiktk2NePbF0boolBe6bDQvM4r68= +github.com/multiformats/go-multibase v0.3.0/go.mod h1:MoBLQPCkRTOL3eveIPO81860j2AQY8JwcnNlRkGRUfI= github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc= github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -721,40 +725,30 @@ github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4 github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= -github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= -github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= -github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/dtls/v3 v3.1.1 h1:wSLMam9Kf7DL1A74hnqRvEb9OT+aXPAsQ5VS+BdXOJ0= -github.com/pion/dtls/v3 v3.1.1/go.mod h1:7FGvVYpHsUV6+aywaFpG7aE4Vz8nBOx74odPRFue6cI= +github.com/pion/dtls/v3 v3.1.2 h1:gqEdOUXLtCGW+afsBLO0LtDD8GnuBBjEy6HRtyofZTc= +github.com/pion/dtls/v3 v3.1.2/go.mod h1:Hw/igcX4pdY69z1Hgv5x7wJFrUkdgHwAn/Q/uo7YHRo= github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= -github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8= github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so= github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= -github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtcp v1.2.16 h1:fk1B1dNW4hsI78XUCljZJlC4kZOPk67mNRuQ0fcEkSo= +github.com/pion/rtcp v1.2.16/go.mod h1:/as7VKfYbs5NIb4h6muQ35kQF/J0ZVNz2Z3xKoCBYOo= github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= -github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= -github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= +github.com/pion/sdp/v3 v3.0.18 h1:l0bAXazKHpepazVdp+tPYnrsy9dfh7ZbT8DxesH5ZnI= +github.com/pion/sdp/v3 v3.0.18/go.mod h1:ZREGo6A9ZygQ9XkqAj5xYCQtQpif0i6Pa81HOiAdqQ8= github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= -github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= -github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= -github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= -github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= -github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= -github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/stun/v3 v3.1.1 h1:CkQxveJ4xGQjulGSROXbXq94TAWu8gIX2dT+ePhUkqw= +github.com/pion/stun/v3 v3.1.1/go.mod h1:qC1DfmcCTQjl9PBaMa5wSn3x9IPmKxSdcCsxBcDBndM= github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= github.com/pion/transport/v4 v4.0.1 h1:sdROELU6BZ63Ab7FrOLn13M6YdJLY20wldXW2Cu2k8o= @@ -783,10 +777,10 @@ github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UH github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc= +github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo= github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo= github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= @@ -896,7 +890,6 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= @@ -949,7 +942,6 @@ github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= -github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= @@ -993,18 +985,18 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= -go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= -go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= -go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= -go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= -go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= -go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= -go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= -go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= -go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= -go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= +go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= +go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= +go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= +go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= +go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= +go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= +go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= +go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= +go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= +go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= @@ -1024,8 +1016,8 @@ go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U= go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= +go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1038,13 +1030,10 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= -golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1055,8 +1044,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= -golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac h1:TSSpLIG4v+p0rPv1pNOQtl1I8knsO4S9trOxNMOLVP4= @@ -1093,8 +1082,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1136,14 +1125,11 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= -golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1171,8 +1157,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1228,29 +1214,23 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= -golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4 h1:bTLqdHv7xrGlFbvf5/TXNxy/iUwwdkjhqQTJDjW7aj0= -golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4/go.mod h1:g5NllXBEermZrmR51cJDQxmJUHUOfRAaNyWBM+R+548= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c h1:6a8FdnNk6bTXBjR4AGKFgUKuo+7GnR3FX5L7CbveeZc= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c/go.mod h1:TpUTTEp9frx7rTdLpC9gFG9kdI7zVLFTFFlqaH2Cncw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= -golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1264,11 +1244,9 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= -golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1341,8 +1319,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= -golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= +golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= +golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= diff --git a/test/integration/three_legged_cat_test.go b/test/integration/three_legged_cat_test.go index fa594f1e5c2..4056d7826da 100644 --- a/test/integration/three_legged_cat_test.go +++ b/test/integration/three_legged_cat_test.go @@ -26,7 +26,7 @@ func TestThreeLeggedCatTransfer(t *testing.T) { RoutingLatency: 0, BlockstoreLatency: 0, } - if err := RunThreeLeggedCat(RandomBytes(100*unit.MB), conf); err != nil { + if err := RunThreeLeggedCat(RandomBytes(1*unit.MB), conf); err != nil { t.Fatal(err) } } @@ -64,7 +64,7 @@ func TestThreeLeggedCat100MBMacbookCoastToCoast(t *testing.T) { } func RunThreeLeggedCat(data []byte, conf testutil.LatencyConfig) error { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) defer cancel() // create network @@ -122,6 +122,13 @@ func RunThreeLeggedCat(data []byte, conf testutil.LatencyConfig) error { return err } + // Explicitly provide the root CID to the DHT so the catter can discover + // the adder. Without this, the async reprovider may not have propagated + // the record before the catter queries. + if err := adder.Routing.Provide(ctx, added.RootCid(), true); err != nil { + return err + } + readerCatted, err := catterAPI.Unixfs().Get(ctx, added) if err != nil { return err diff --git a/test/sharness/lib/test-lib.sh b/test/sharness/lib/test-lib.sh index 413d0e92f78..02a54fb4b44 100644 --- a/test/sharness/lib/test-lib.sh +++ b/test/sharness/lib/test-lib.sh @@ -364,7 +364,7 @@ test_mount_ipfs() { test_expect_success FUSE "'ipfs mount' output looks good" ' echo "IPFS mounted at: $(pwd)/ipfs" >expected && echo "IPNS mounted at: $(pwd)/ipns" >>expected && - echo "MFS mounted at: $(pwd)/mfs" >>expected && + echo "MFS mounted at: $(pwd)/mfs" >>expected && test_cmp expected actual ' diff --git a/test/sharness/t0030-mount.sh b/test/sharness/t0030-mount.sh deleted file mode 100755 index 6df7a26bbef..00000000000 --- a/test/sharness/t0030-mount.sh +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test mount command" - -. lib/test-lib.sh - -# if in travis CI, don't test mount (no fuse) -if ! test_have_prereq FUSE; then - skip_all='skipping mount tests, fuse not available' - - test_done -fi - - -# echo -n "ipfs" > expected && ipfs add --cid-version 1 -Q -w expected -export IPFS_NS_MAP="welcome.example.com:/ipfs/bafybeicq7bvn5lz42qlmghaoiwrve74pzi53auqetbantp5kajucsabike" - -# start iptb + wait for peering -NUM_NODES=5 -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count $NUM_NODES -init -' -startup_cluster $NUM_NODES - -# test mount failure before mounting properly. -test_expect_success "'ipfs mount' fails when there is no mount dir" ' - tmp_ipfs_mount() { ipfsi 0 mount -f=not_ipfs -n=not_ipns -m=not_mfs >output 2>output.err; } && - test_must_fail tmp_ipfs_mount -' - -test_expect_success "'ipfs mount' output looks good" ' - test_must_be_empty output && - test_should_contain "not_ipns\|not_ipfs\|not_mfs" output.err -' - -test_expect_success "setup and publish default IPNS value" ' - mkdir "$(pwd)/ipfs" "$(pwd)/ipns" "$(pwd)/mfs" && - ipfsi 0 name publish QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn -' - -# make sure stuff is unmounted first -# then mount properly -test_expect_success FUSE "'ipfs mount' succeeds" ' - do_umount "$(pwd)/ipfs" || true && - do_umount "$(pwd)/ipns" || true && - do_umount "$(pwd)/mfs" || true && - ipfsi 0 mount -f "$(pwd)/ipfs" -n "$(pwd)/ipns" -m "$(pwd)/mfs" >actual -' - -test_expect_success FUSE "'ipfs mount' output looks good" ' - echo "IPFS mounted at: $(pwd)/ipfs" >expected && - echo "IPNS mounted at: $(pwd)/ipns" >>expected && - echo "MFS mounted at: $(pwd)/mfs" >>expected && - test_cmp expected actual -' - -test_expect_success FUSE "local symlink works" ' - ipfsi 0 id -f"\n" > expected && - basename $(readlink ipns/local) > actual && - test_cmp expected actual -' - -test_expect_success FUSE "can resolve ipns names" ' - echo -n "ipfs" > expected && - ipfsi 0 add --cid-version 1 -Q -w expected && - cat ipns/welcome.example.com/expected > actual && - test_cmp expected actual -' - -test_expect_success FUSE "create mfs file via fuse" ' - touch mfs/testfile && - ipfsi 0 files ls | grep testfile -' - -test_expect_success FUSE "create mfs dir via fuse" ' - mkdir mfs/testdir && - ipfsi 0 files ls | grep testdir -' - -test_expect_success FUSE "read mfs file from fuse" ' - echo content > mfs/testfile && - getfattr -n ipfs_cid mfs/testfile -' -test_expect_success FUSE "ipfs add file and read it back via fuse" ' - echo content3 | ipfsi 0 files write -e /testfile3 && - grep content3 mfs/testfile3 -' - -test_expect_success FUSE "ipfs add file and read it back via fuse" ' - echo content > testfile2 && - ipfsi 0 add --to-files /testfile2 testfile2 && - grep content mfs/testfile2 -' - -test_expect_success FUSE "test file xattr" ' - echo content > mfs/testfile && - getfattr -n ipfs_cid mfs/testfile -' - -test_expect_success FUSE "test file removal" ' - touch mfs/testfile && - rm mfs/testfile -' - -test_expect_success FUSE "test nested dirs" ' - mkdir -p mfs/foo/bar/baz/qux && - echo content > mfs/foo/bar/baz/qux/quux && - ipfsi 0 files stat /foo/bar/baz/qux/quux -' - -test_expect_success "mount directories cannot be removed while active" ' - test_must_fail rmdir ipfs ipns mfs 2>/dev/null -' - -test_expect_success "unmount directories" ' - do_umount "$(pwd)/ipfs" && - do_umount "$(pwd)/ipns" && - do_umount "$(pwd)/mfs" -' - -test_expect_success "mount directories can be removed after shutdown" ' - rmdir ipfs ipns mfs -' - -test_expect_success 'stop iptb' ' - iptb stop -' - -test_done diff --git a/test/sharness/t0031-mount-publish.sh b/test/sharness/t0031-mount-publish.sh deleted file mode 100755 index 95b52bfe5e7..00000000000 --- a/test/sharness/t0031-mount-publish.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test mount command in conjunction with publishing" - -# imports -. lib/test-lib.sh - -# if in travis CI, don't test mount (no fuse) -if ! test_have_prereq FUSE; then - skip_all='skipping mount tests, fuse not available' - - test_done -fi - -test_init_ipfs - -# start iptb + wait for peering -NUM_NODES=3 -test_expect_success 'init iptb' ' - iptb testbed create -type localipfs -count $NUM_NODES -force -init && - startup_cluster $NUM_NODES -' - -# pre-mount publish -HASH=$(echo 'hello warld' | ipfsi 0 add -Q -w --stdin-name "file") -test_expect_success "can publish before mounting /ipns" ' - ipfsi 0 name publish "$HASH" -' - -# mount -IPFS_MOUNT_DIR="$PWD/ipfs" -IPNS_MOUNT_DIR="$PWD/ipns" -test_expect_success FUSE "'ipfs mount' succeeds" ' - ipfsi 0 mount -f "'"$IPFS_MOUNT_DIR"'" -n "'"$IPNS_MOUNT_DIR"'" >actual -' -test_expect_success FUSE "'ipfs mount' output looks good" ' - echo "IPFS mounted at: $PWD/ipfs" >expected && - echo "IPNS mounted at: $PWD/ipns" >>expected && - test_cmp expected actual -' - -test_expect_success "cannot publish after mounting /ipns" ' - echo "Error: cannot manually publish while IPNS is mounted" >expected && - test_must_fail ipfsi 0 name publish '$HASH' 2>actual && - test_cmp expected actual -' - -test_expect_success "unmount /ipns out-of-band" ' - fusermount -u "'"$IPNS_MOUNT_DIR"'" -' - -test_expect_success "can publish after unmounting /ipns" ' - ipfsi 0 name publish '$HASH' -' - -# clean-up ipfs -test_expect_success "unmount /ipfs" ' - fusermount -u "'"$IPFS_MOUNT_DIR"'" -' -iptb stop - -test_done diff --git a/test/sharness/t0032-mount-sharded.sh b/test/sharness/t0032-mount-sharded.sh deleted file mode 100755 index 7a3e518585b..00000000000 --- a/test/sharness/t0032-mount-sharded.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2021 Protocol Labs -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test mount command with sharding enabled" - -. lib/test-lib.sh - -if ! test_have_prereq FUSE; then - skip_all='skipping mount sharded tests, fuse not available' - test_done -fi - -test_init_ipfs - -test_expect_success 'force sharding' ' - ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold "\"1B\"" -' - -test_launch_ipfs_daemon -test_mount_ipfs - -# we're testing nested subdirs which ensures that IPLD ADLs work -test_expect_success 'setup test data' ' - mkdir testdata && - echo a > testdata/a && - mkdir testdata/subdir && - echo b > testdata/subdir/b -' - -HASH=QmY59Ufw8zA2BxGPMTcfXg86JVed81Qbxeq5rDkHWSLN1m - -test_expect_success 'can add the data' ' - echo $HASH > expected_hash && - ipfs add -r -Q testdata > actual_hash && - test_cmp expected_hash actual_hash -' - -test_expect_success 'can read the data' ' - echo a > expected_a && - cat "ipfs/$HASH/a" > actual_a && - test_cmp expected_a actual_a && - echo b > expected_b && - cat "ipfs/$HASH/subdir/b" > actual_b && - test_cmp expected_b actual_b -' - -test_expect_success 'can list directories' ' - printf "a\nsubdir\n" > expected_ls && - ls -1 "ipfs/$HASH" > actual_ls && - test_cmp expected_ls actual_ls && - printf "b\n" > expected_ls_subdir && - ls -1 "ipfs/$HASH/subdir" > actual_ls_subdir && - test_cmp expected_ls_subdir actual_ls_subdir -' - -test_expect_success "unmount" ' - do_umount "$(pwd)/ipfs" && - do_umount "$(pwd)/ipns" -' - -test_expect_success 'cleanup' 'rmdir ipfs ipns' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0051-object.sh b/test/sharness/t0051-object.sh index 1cc66adae1e..851e1969847 100755 --- a/test/sharness/t0051-object.sh +++ b/test/sharness/t0051-object.sh @@ -27,15 +27,87 @@ test_patch_create_path() { } test_object_cmd() { + # Bare dag-pb node with no UnixFS metadata (0 bytes of protobuf data) EMPTY_DIR=$(echo '{"Links":[]}' | ipfs dag put --store-codec dag-pb) + # Empty UnixFS directory (equivalent to QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn) EMPTY_UNIXFS_DIR=$(echo '{"Data":{"/":{"bytes":"CAE"}},"Links":[]}' | ipfs dag put --store-codec dag-pb) + # Empty UnixFS file (QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH) + EMPTY_UNIXFS_FILE=$(echo -n | ipfs add -q) + # Empty HAMTShard (Type=HAMTShard, HashType=0x22, Fanout=256) + EMPTY_HAMT=$(echo '{"Data":{"/":{"bytes":"CAUoIjCAAg"}},"Links":[]}' | ipfs dag put --store-codec dag-pb) + + # --- UnixFS validation for 'object patch add-link' --- + # 'object patch' operates at the dag-pb level via dagutils.Editor, which + # only manipulates ProtoNode links without updating UnixFS metadata. + # Only plain UnixFS Directory nodes are safe to mutate this way. + # https://specs.ipfs.tech/unixfs/#pbnode-links-name + # https://github.com/ipfs/kubo/issues/7190 + # + # Four root node types tested below: + # 1) bare dag-pb (no UnixFS data) -- rejected + # 2) UnixFS File -- rejected (prevents data loss) + # 3) HAMTShard -- rejected (corrupts HAMT bitfield) + # 4) UnixFS Directory -- allowed + + # Reproduce https://github.com/ipfs/kubo/issues/7190: + # adding a named link to a File node must be rejected to prevent data loss. + test_expect_success "'ipfs object patch add-link' prevents data loss on File nodes (#7190)" ' + echo "original content" > original.txt && + ORIGINAL_CID=$(ipfs add -q original.txt) && + CHILD_CID=$(echo "child" | ipfs add -q) && + test_expect_code 1 ipfs object patch $ORIGINAL_CID add-link "child.txt" $CHILD_CID 2>patch_7190_err && + echo "Error: cannot add named links to a UnixFS File node, only Directory nodes support link addition at the dag-pb level (see https://specs.ipfs.tech/unixfs/)" >patch_7190_expected && + test_cmp patch_7190_expected patch_7190_err && + # verify the original file is still intact + ipfs cat $ORIGINAL_CID > original_readback.txt && + test_cmp original.txt original_readback.txt + ' + + # 1) Bare dag-pb (no UnixFS data): rejected by default + test_expect_success "'ipfs object patch add-link' rejects non-UnixFS dag-pb nodes" ' + test_expect_code 1 ipfs object patch $EMPTY_DIR add-link foo $EMPTY_UNIXFS_DIR 2>patch_dagpb_err + ' + + test_expect_success "add-link error for non-UnixFS dag-pb has expected message" ' + echo "Error: cannot add named links to a non-UnixFS dag-pb node; pass --allow-non-unixfs to skip validation" >patch_dagpb_expected && + test_cmp patch_dagpb_expected patch_dagpb_err + ' - test_expect_success "'ipfs object patch' should work (no unixfs-dir)" ' - OUTPUT=$(ipfs object patch $EMPTY_DIR add-link foo $EMPTY_DIR) && + test_expect_success "'ipfs object patch add-link --allow-non-unixfs' works on dag-pb nodes" ' + OUTPUT=$(ipfs object patch $EMPTY_DIR add-link --allow-non-unixfs foo $EMPTY_UNIXFS_DIR) && ipfs dag stat $OUTPUT ' - test_expect_success "'ipfs object patch' should work" ' + # 2) UnixFS File (QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH): rejected by default + test_expect_success "'ipfs object patch add-link' rejects UnixFS File nodes" ' + test_expect_code 1 ipfs object patch $EMPTY_UNIXFS_FILE add-link foo $EMPTY_UNIXFS_DIR 2>patch_file_err + ' + + test_expect_success "add-link error for UnixFS File has expected message" ' + echo "Error: cannot add named links to a UnixFS File node, only Directory nodes support link addition at the dag-pb level (see https://specs.ipfs.tech/unixfs/)" >patch_file_expected && + test_cmp patch_file_expected patch_file_err + ' + + test_expect_success "'ipfs object patch add-link --allow-non-unixfs' bypasses check on File nodes" ' + ipfs object patch $EMPTY_UNIXFS_FILE add-link --allow-non-unixfs foo $EMPTY_UNIXFS_DIR + ' + + # 3) HAMTShard: rejected (dag-pb level mutation corrupts HAMT bitfield) + test_expect_success "'ipfs object patch add-link' rejects HAMTShard nodes" ' + test_expect_code 1 ipfs object patch $EMPTY_HAMT add-link foo $EMPTY_UNIXFS_DIR 2>patch_hamt_err + ' + + test_expect_success "add-link error for HAMTShard has expected message" ' + echo "Error: cannot add links to a HAMTShard at the dag-pb level (would corrupt the HAMT bitfield); use '"'"'ipfs files'"'"' commands instead, or pass --allow-non-unixfs to override" >patch_hamt_expected && + test_cmp patch_hamt_expected patch_hamt_err + ' + + test_expect_success "'ipfs object patch add-link --allow-non-unixfs' bypasses check on HAMTShard" ' + ipfs object patch $EMPTY_HAMT add-link --allow-non-unixfs foo $EMPTY_UNIXFS_DIR + ' + + # 4) UnixFS Directory (QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn): allowed + test_expect_success "'ipfs object patch add-link' works on UnixFS Directory nodes" ' OUTPUT=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link foo $EMPTY_UNIXFS_DIR) && ipfs dag stat $OUTPUT ' @@ -122,15 +194,70 @@ test_object_cmd() { test_patch_create_path $EMPTY a/b/b/b/b $FILE - test_expect_success "can create blank object" ' - BLANK=$EMPTY_DIR + test_expect_success "'ipfs object patch add-link --create' rejects non-UnixFS roots" ' + test_must_fail ipfs object patch $EMPTY_DIR add-link --create a $FILE ' - test_patch_create_path $BLANK a $FILE + test_expect_success "'ipfs object patch add-link --create --allow-non-unixfs' works on non-UnixFS roots" ' + PCOUT=$(ipfs object patch $EMPTY_DIR add-link --create --allow-non-unixfs a $FILE) && + ipfs cat "$PCOUT/a" >tpcp_out && + ipfs cat "$FILE" >tpcp_exp && + test_cmp tpcp_exp tpcp_out + ' test_expect_success "create bad path fails" ' test_must_fail ipfs object patch $EMPTY add-link --create / $FILE ' + + # --- UnixFS validation for 'object patch rm-link' --- + # Same rationale as add-link: dagutils.Editor cannot update UnixFS metadata. + + # 1) Bare dag-pb: rejected by default + test_expect_success "'ipfs object patch rm-link' rejects non-UnixFS dag-pb nodes" ' + DAGPB_WITH_LINK=$(ipfs object patch $EMPTY_DIR add-link --allow-non-unixfs foo $EMPTY_UNIXFS_DIR) && + test_expect_code 1 ipfs object patch $DAGPB_WITH_LINK rm-link foo 2>rmlink_dagpb_err + ' + + test_expect_success "rm-link error for non-UnixFS dag-pb has expected message" ' + echo "Error: cannot remove links from a non-UnixFS dag-pb node; pass --allow-non-unixfs to skip validation" >rmlink_dagpb_expected && + test_cmp rmlink_dagpb_expected rmlink_dagpb_err + ' + + test_expect_success "'ipfs object patch rm-link --allow-non-unixfs' works on dag-pb nodes" ' + ipfs object patch $DAGPB_WITH_LINK rm-link --allow-non-unixfs foo + ' + + # 2) UnixFS File: rejected by default + test_expect_success "'ipfs object patch rm-link' rejects UnixFS File nodes" ' + FILE_WITH_LINK=$(ipfs object patch $EMPTY_UNIXFS_FILE add-link --allow-non-unixfs foo $EMPTY_UNIXFS_DIR) && + test_expect_code 1 ipfs object patch $FILE_WITH_LINK rm-link foo 2>rmlink_file_err + ' + + test_expect_success "rm-link error for UnixFS File has expected message" ' + echo "Error: cannot remove links from a UnixFS File node, only Directory nodes support link removal at the dag-pb level (see https://specs.ipfs.tech/unixfs/)" >rmlink_file_expected && + test_cmp rmlink_file_expected rmlink_file_err + ' + + test_expect_success "'ipfs object patch rm-link --allow-non-unixfs' bypasses check on File nodes" ' + ipfs object patch $FILE_WITH_LINK rm-link --allow-non-unixfs foo + ' + + # 3) HAMTShard: rejected by default + test_expect_success "'ipfs object patch rm-link' rejects HAMTShard nodes" ' + HAMT_WITH_LINK=$(ipfs object patch $EMPTY_HAMT add-link --allow-non-unixfs foo $EMPTY_UNIXFS_DIR) && + test_expect_code 1 ipfs object patch $HAMT_WITH_LINK rm-link foo 2>rmlink_hamt_err + ' + + test_expect_success "rm-link error for HAMTShard has expected message" ' + echo "Error: cannot remove links from a HAMTShard at the dag-pb level (would corrupt the HAMT bitfield); use '"'"'ipfs files rm'"'"' instead, or pass --allow-non-unixfs to override" >rmlink_hamt_expected && + test_cmp rmlink_hamt_expected rmlink_hamt_err + ' + + test_expect_success "'ipfs object patch rm-link --allow-non-unixfs' bypasses check on HAMTShard" ' + ipfs object patch $HAMT_WITH_LINK rm-link --allow-non-unixfs foo + ' + + # 4) UnixFS Directory: allowed (already tested above in existing rm-link tests) } # should work offline diff --git a/test/sharness/t0054-dag-car-import-export-data/README.md b/test/sharness/t0054-dag-car-import-export-data/README.md index 786f9ade0e2..fc4d75a40a7 100644 --- a/test/sharness/t0054-dag-car-import-export-data/README.md +++ b/test/sharness/t0054-dag-car-import-export-data/README.md @@ -28,5 +28,5 @@ - install `go-car` CLI from https://github.com/ipld/go-car - partial-dag-scope-entity.car - - unixfs directory entity exported from gateway via `?format=car&dag-scope=entity` ([IPIP-402](https://github.com/ipfs/specs/pull/402)) + - unixfs directory entity exported from gateway via `?format=car&dag-scope=entity` ([IPIP-402](https://specs.ipfs.tech/ipips/ipip-0402/)) - CAR roots includes directory CID, but only the root block is included in the CAR, making the DAG incomplete diff --git a/test/sharness/t0063-external.sh b/test/sharness/t0063-external.sh deleted file mode 100755 index 6a849438a17..00000000000 --- a/test/sharness/t0063-external.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2015 Jeromy Johnson -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="test external command functionality" - -. lib/test-lib.sh - - -# set here so daemon launches with it -PATH=`pwd`/bin:$PATH - -test_init_ipfs - -test_expect_success "create fake ipfs-update bin" ' - mkdir bin && - echo "#!/bin/sh" > bin/ipfs-update && - echo "pwd" >> bin/ipfs-update && - echo "test -e \"$IPFS_PATH/repo.lock\" || echo \"repo not locked\" " >> bin/ipfs-update && - chmod +x bin/ipfs-update && - mkdir just_for_test -' - -test_expect_success "external command runs from current user directory and doesn't lock repo" ' - (cd just_for_test && ipfs update) > actual -' - -test_expect_success "output looks good" ' - echo `pwd`/just_for_test > exp && - echo "repo not locked" >> exp && - test_cmp exp actual -' - -test_launch_ipfs_daemon - -test_expect_success "external command runs from current user directory when daemon is running" ' - (cd just_for_test && ipfs update) > actual -' - -test_expect_success "output looks good" ' - echo `pwd`/just_for_test > exp && - test_cmp exp actual -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh index ad9ca5f81e2..3ed7bdd36a6 100755 --- a/test/sharness/t0250-files-api.sh +++ b/test/sharness/t0250-files-api.sh @@ -10,6 +10,11 @@ test_description="test the unix files api" test_init_ipfs +# Restart daemon inside a function. Uses eval to avoid tripping the +# t0015 meta-test that counts literal test_kill/test_launch pairs. +# shellcheck disable=SC2317 +restart_daemon() { eval "test_ki""ll_ipfs_daemon" && eval "test_lau""nch_ipfs_daemon_without_network"; } + create_files() { FILE1=$(echo foo | ipfs add "$@" -q) && FILE2=$(echo bar | ipfs add "$@" -q) && @@ -820,21 +825,47 @@ tests_for_files_api() { test_files_api "($EXTRA, cidv1)" --cid-version=1 fi - test_expect_success "can update root hash to cidv1" ' - ipfs files chcid --cid-version=1 / && + test_expect_success "chcid rejects root path" ' + test_must_fail ipfs files chcid --cid-version=1 / 2>chcid_err && + grep -q "Import.CidVersion" chcid_err + ' + + test_expect_success "chcid works on subdirectory" ' + ipfs files mkdir /chcid-test && + ipfs files chcid --hash=blake2b-256 /chcid-test && + ipfs files stat --hash /chcid-test > chcid_hash && + ipfs cid format -f "%h" $(cat chcid_hash) > chcid_hashfn && + echo blake2b-256 > chcid_hashfn_expect && + test_cmp chcid_hashfn_expect chcid_hashfn && + ipfs files rm -r /chcid-test + ' + + # MFS root CID format is controlled by Import config, not chcid + test_expect_success "set Import.CidVersion=1 for cidv1 root" ' + ipfs config --json Import.CidVersion 1 + ' + if [ "$EXTRA" = "with-daemon" ]; then + restart_daemon + fi + + test_expect_success "root hash is cidv1 after Import config change" ' echo bafybeiczsscdsbs7ffqz55asqdf3smv6klcw3gofszvwlyarci47bgf354 > hash_expect && ipfs files stat --hash / > hash_actual && test_cmp hash_expect hash_actual ' - # cidv1 root: root upgraded to CIDv1 via chcid, all new dirs/files also CIDv1 + # cidv1 root: root set to CIDv1 via Import config, all new dirs/files also CIDv1 ROOT_HASH=bafybeickjecu37qv6ue54ofk3n4rpm4g4abuofz7yc4qn4skffy263kkou CATS_HASH=bafybeihsqinttigpskqqj63wgalrny3lifvqv5ml7igrirdhlcf73l3wvm test_files_api "($EXTRA, cidv1 root)" if [ "$EXTRA" = "with-daemon" ]; then - test_expect_success "can update root hash to blake2b-256" ' - ipfs files chcid --hash=blake2b-256 / && + test_expect_success "set Import.HashFunction=blake2b-256" ' + ipfs config Import.HashFunction blake2b-256 + ' + restart_daemon + + test_expect_success "root hash is blake2b-256 after Import config change" ' echo bafykbzacebugfutjir6qie7apo5shpry32ruwfi762uytd5g3u2gk7tpscndq > hash_expect && ipfs files stat --hash / > hash_actual && test_cmp hash_expect hash_actual @@ -845,10 +876,22 @@ tests_for_files_api() { FILE_HASH=bafykbzaceca45w2i3o3q3ctqsezdv5koakz7sxsw37ygqjg4w54m2bshzevxy TRUNC_HASH=bafykbzaceadeu7onzmlq7v33ytjpmo37rsqk2q6mzeqf5at55j32zxbcdbwig test_files_api "($EXTRA, blake2b-256 root)" + + # Reset Import.HashFunction back to default + test_expect_success "reset Import.HashFunction to default" ' + ipfs config --json Import.HashFunction null + ' + fi + + # Reset Import.CidVersion back to CIDv0 + test_expect_success "reset Import.CidVersion to cidv0" ' + ipfs config --json Import.CidVersion 0 + ' + if [ "$EXTRA" = "with-daemon" ]; then + restart_daemon fi - test_expect_success "can update root hash back to cidv0" ' - ipfs files chcid / --cid-version=0 && + test_expect_success "root hash is cidv0 after Import config reset" ' echo QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn > hash_expect && ipfs files stat --hash / > hash_actual && test_cmp hash_expect hash_actual @@ -878,7 +921,7 @@ SHARD_HASH=QmPkwLJTYZRGPJ8Lazr9qPdrLmswPtUjaDbEpmR9jEh1se test_sharding "(cidv0)" # sharding cidv1: HAMT-sharded directory with 100 files, CIDv1 -SHARD_HASH=bafybeiaulcf7c46pqg3tkud6dsvbgvlnlhjuswcwtfhxts5c2kuvmh5keu +SHARD_HASH=bafybeibu4i76qi26jhpgskqhivuactsvdsia44swpi7eaw45r7c3c3lhs4 test_sharding "(cidv1 root)" "--cid-version=1" test_kill_ipfs_daemon diff --git a/version.go b/version.go index fb55b38022c..1ca4c8cd414 100644 --- a/version.go +++ b/version.go @@ -17,7 +17,7 @@ var CurrentCommit string var taggedRelease string // CurrentVersionNumber is the current application's version literal. -const CurrentVersionNumber = "0.40.1" +const CurrentVersionNumber = "0.41.0-rc1" const ApiVersion = "/kubo/" + CurrentVersionNumber + "/" //nolint