diff --git a/.github/actions/build-docker-image/action.yml b/.github/actions/build-docker-image/action.yml index 0001d106ec..efd87d942e 100644 --- a/.github/actions/build-docker-image/action.yml +++ b/.github/actions/build-docker-image/action.yml @@ -50,7 +50,7 @@ runs: - uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 with: cache-image: false - - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 + - uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 - uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 id: meta diff --git a/.github/actions/cmake/action.yml b/.github/actions/cmake/action.yml index 3dbda8422c..96898d704e 100644 --- a/.github/actions/cmake/action.yml +++ b/.github/actions/cmake/action.yml @@ -37,6 +37,10 @@ inputs: description: Whether to generate Debian package required: true default: "false" + version: + description: Version of the clio_server binary + required: false + default: "" runs: using: composite @@ -57,6 +61,19 @@ runs: STATIC: "${{ inputs.static == 'true' && 'ON' || 'OFF' }}" TIME_TRACE: "${{ inputs.time_trace == 'true' && 'ON' || 'OFF' }}" PACKAGE: "${{ inputs.package == 'true' && 'ON' || 'OFF' }}" + # GitHub creates a merge commit for a PR + # https://www.kenmuse.com/blog/the-many-shas-of-a-github-pull-request/ + # + # We: + # - explicitly provide branch name + # - use `github.head_ref` to get the SHA of last commit in the PR branch + # + # This way it works both for PRs and pushes to branches. + GITHUB_BRANCH_NAME: "${{ github.head_ref || github.ref_name }}" + GITHUB_HEAD_SHA: "${{ github.event.pull_request.head.sha || github.sha }}" + # + # If tag is being pushed, or it's a nightly release, we use that version. + FORCE_CLIO_VERSION: ${{ inputs.version }} run: | cmake \ -B "${BUILD_DIR}" \ diff --git a/.github/scripts/conan/generate_matrix.py b/.github/scripts/conan/generate_matrix.py index 3c18f04192..213ea23d3e 100755 --- a/.github/scripts/conan/generate_matrix.py +++ b/.github/scripts/conan/generate_matrix.py @@ -4,7 +4,7 @@ LINUX_OS = ["heavy", "heavy-arm64"] LINUX_CONTAINERS = [ - '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' ] LINUX_COMPILERS = ["gcc", "clang"] diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 10e64db0c3..937c5522c3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -49,7 +49,7 @@ jobs: build_type: [Release, Debug] container: [ - '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }', + '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }', ] static: [true] @@ -79,7 +79,7 @@ jobs: uses: ./.github/workflows/reusable-build.yml with: runs_on: heavy - container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' conan_profile: gcc build_type: Debug download_ccache: true @@ -97,7 +97,7 @@ jobs: needs: build-and-test runs-on: heavy container: - image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f + image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 diff --git a/.github/workflows/check-libxrpl.yml b/.github/workflows/check-libxrpl.yml index 4e5b744ae9..c18a322fd7 100644 --- a/.github/workflows/check-libxrpl.yml +++ b/.github/workflows/check-libxrpl.yml @@ -21,7 +21,7 @@ jobs: name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}` runs-on: heavy container: - image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f + image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 @@ -29,9 +29,9 @@ jobs: fetch-depth: 0 - name: Prepare runner - uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6 + uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190 with: - disable_ccache: true + enable_ccache: false - name: Update libXRPL version requirement run: | @@ -69,7 +69,7 @@ jobs: needs: build runs-on: heavy container: - image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f + image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 steps: - uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 diff --git a/.github/workflows/clang-tidy.yml b/.github/workflows/clang-tidy.yml index db75ab3004..8fc1362fd9 100644 --- a/.github/workflows/clang-tidy.yml +++ b/.github/workflows/clang-tidy.yml @@ -31,7 +31,7 @@ jobs: if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes') runs-on: heavy container: - image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f + image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 permissions: contents: write @@ -44,9 +44,9 @@ jobs: fetch-depth: 0 - name: Prepare runner - uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6 + uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190 with: - disable_ccache: true + enable_ccache: false - name: Run conan uses: ./.github/actions/conan diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index f35b872bd0..20aae8682c 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -18,7 +18,7 @@ jobs: build: runs-on: ubuntu-latest container: - image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f + image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 steps: - name: Checkout @@ -27,9 +27,9 @@ jobs: lfs: true - name: Prepare runner - uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6 + uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190 with: - disable_ccache: true + enable_ccache: false - name: Create build directory run: mkdir build_docs diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index b1529190ce..064c5e7ae1 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -28,8 +28,20 @@ defaults: shell: bash jobs: + get_date: + name: Get Date + runs-on: ubuntu-latest + outputs: + date: ${{ steps.get_date.outputs.date }} + steps: + - name: Get current date + id: get_date + run: | + echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT + build-and-test: name: Build and Test + needs: get_date strategy: fail-fast: false @@ -43,17 +55,17 @@ jobs: conan_profile: gcc build_type: Release static: true - container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' - os: heavy conan_profile: gcc build_type: Debug static: true - container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' - os: heavy conan_profile: gcc.ubsan build_type: Release static: false - container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' uses: ./.github/workflows/reusable-build-test.yml with: @@ -67,14 +79,16 @@ jobs: upload_clio_server: true download_ccache: false upload_ccache: false + version: nightly-${{ needs.get_date.outputs.date }} package: name: Build debian package + needs: get_date uses: ./.github/workflows/reusable-build.yml with: runs_on: heavy - container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' conan_profile: gcc build_type: Release download_ccache: false @@ -83,11 +97,13 @@ jobs: static: true upload_clio_server: false package: true + version: nightly-${{ needs.get_date.outputs.date }} targets: package analyze_build_time: false analyze_build_time: name: Analyze Build Time + needs: get_date strategy: fail-fast: false @@ -95,7 +111,7 @@ jobs: include: - os: heavy conan_profile: clang - container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' static: true - os: macos15 conan_profile: apple-clang @@ -114,17 +130,7 @@ jobs: upload_clio_server: false targets: all analyze_build_time: true - - get_date: - name: Get Date - runs-on: ubuntu-latest - outputs: - date: ${{ steps.get_date.outputs.date }} - steps: - - name: Get current date - id: get_date - run: | - echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT + version: nightly-${{ needs.get_date.outputs.date }} nightly_release: needs: [build-and-test, package, get_date] diff --git a/.github/workflows/pre-commit-autoupdate.yml b/.github/workflows/pre-commit-autoupdate.yml index 0ff638b66b..c054eb7426 100644 --- a/.github/workflows/pre-commit-autoupdate.yml +++ b/.github/workflows/pre-commit-autoupdate.yml @@ -12,7 +12,7 @@ on: jobs: auto-update: - uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@afbcbdafbe0ce5439492fb87eda6441371086386 + uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@ad4ab1ae5a54a4bab0e87294c31fc0729f788b2b with: sign_commit: true committer: "Clio CI " diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index c17d2e5740..d600735f0f 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -8,7 +8,7 @@ on: jobs: run-hooks: - uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a + uses: XRPLF/actions/.github/workflows/pre-commit.yml@282890f46d6921249d5659dd38babcb0bd8aef48 with: runs_on: heavy - container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:14342e087ceb8b593027198bf9ef06a43833c696" }' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 158107a423..99bd265346 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,7 +29,7 @@ jobs: conan_profile: gcc build_type: Release static: true - container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' uses: ./.github/workflows/reusable-build-test.yml with: @@ -43,7 +43,7 @@ jobs: upload_clio_server: true download_ccache: false upload_ccache: false - expected_version: ${{ github.event_name == 'push' && github.ref_name || '' }} + version: ${{ github.event_name == 'push' && github.ref_name || '' }} package: name: Build debian package @@ -51,7 +51,7 @@ jobs: uses: ./.github/workflows/reusable-build.yml with: runs_on: heavy - container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' conan_profile: gcc build_type: Release download_ccache: false @@ -60,6 +60,7 @@ jobs: static: true upload_clio_server: false package: true + version: ${{ github.event_name == 'push' && github.ref_name || '' }} targets: package analyze_build_time: false diff --git a/.github/workflows/reusable-build-test.yml b/.github/workflows/reusable-build-test.yml index bf9d82515e..d3cbe543d0 100644 --- a/.github/workflows/reusable-build-test.yml +++ b/.github/workflows/reusable-build-test.yml @@ -63,18 +63,18 @@ on: type: string default: all - expected_version: - description: Expected version of the clio_server binary - required: false - type: string - default: "" - package: description: Whether to generate Debian package required: false type: boolean default: false + version: + description: Version of the clio_server binary + required: false + type: string + default: "" + jobs: build: uses: ./.github/workflows/reusable-build.yml @@ -90,8 +90,8 @@ jobs: upload_clio_server: ${{ inputs.upload_clio_server }} targets: ${{ inputs.targets }} analyze_build_time: false - expected_version: ${{ inputs.expected_version }} package: ${{ inputs.package }} + version: ${{ inputs.version }} test: needs: build diff --git a/.github/workflows/reusable-build.yml b/.github/workflows/reusable-build.yml index 4586097e83..46ea0386d0 100644 --- a/.github/workflows/reusable-build.yml +++ b/.github/workflows/reusable-build.yml @@ -60,17 +60,17 @@ on: required: true type: boolean - expected_version: - description: Expected version of the clio_server binary - required: false - type: string - default: "" - package: description: Whether to generate Debian package required: false type: boolean + version: + description: Version of the clio_server binary + required: false + type: string + default: "" + secrets: CODECOV_TOKEN: required: false @@ -93,15 +93,11 @@ jobs: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: fetch-depth: 0 - # We need to fetch tags to have correct version in the release - # The workaround is based on https://github.com/actions/checkout/issues/1467 - fetch-tags: true - ref: ${{ github.ref }} - name: Prepare runner - uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6 + uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190 with: - disable_ccache: ${{ !inputs.download_ccache }} + enable_ccache: ${{ inputs.download_ccache }} - name: Setup conan on macOS if: ${{ runner.os == 'macOS' }} @@ -117,7 +113,7 @@ jobs: - name: Restore ccache cache if: ${{ inputs.download_ccache && github.ref != 'refs/heads/develop' }} - uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 with: path: ${{ env.CCACHE_DIR }} key: ${{ steps.cache_key.outputs.key }} @@ -139,6 +135,7 @@ jobs: static: ${{ inputs.static }} time_trace: ${{ inputs.analyze_build_time }} package: ${{ inputs.package }} + version: ${{ inputs.version }} - name: Build Clio uses: ./.github/actions/build-clio @@ -162,12 +159,12 @@ jobs: - name: Show ccache's statistics and zero it if: ${{ inputs.download_ccache }} run: | - ccache --show-stats + ccache --show-stats -vv ccache --zero-stats - name: Save ccache cache if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }} - uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 with: path: ${{ env.CCACHE_DIR }} key: ${{ steps.cache_key.outputs.key }} @@ -218,15 +215,20 @@ jobs: if: ${{ inputs.code_coverage }} uses: ./.github/actions/code-coverage - - name: Verify expected version - if: ${{ inputs.expected_version != '' }} + - name: Verify version is expected + if: ${{ inputs.version != '' }} env: - INPUT_EXPECTED_VERSION: ${{ inputs.expected_version }} + INPUT_VERSION: ${{ inputs.version }} + BUILD_TYPE: ${{ inputs.build_type }} run: | set -e - EXPECTED_VERSION="clio-${INPUT_EXPECTED_VERSION}" - actual_version=$(./build/clio_server --version) - if [[ "$actual_version" != "$EXPECTED_VERSION" ]]; then + EXPECTED_VERSION="clio-${INPUT_VERSION}" + if [[ "${BUILD_TYPE}" == "Debug" ]]; then + EXPECTED_VERSION="${EXPECTED_VERSION}+DEBUG" + fi + + actual_version=$(./build/clio_server --version | head -n 1) + if [[ "${actual_version}" != "${EXPECTED_VERSION}" ]]; then echo "Expected version '${EXPECTED_VERSION}', but got '${actual_version}'" exit 1 fi diff --git a/.github/workflows/reusable-release.yml b/.github/workflows/reusable-release.yml index c29e26518c..35dbce4a8d 100644 --- a/.github/workflows/reusable-release.yml +++ b/.github/workflows/reusable-release.yml @@ -46,7 +46,7 @@ jobs: release: runs-on: heavy container: - image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f + image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 env: GH_REPO: ${{ github.repository }} GH_TOKEN: ${{ github.token }} @@ -60,9 +60,9 @@ jobs: fetch-depth: 0 - name: Prepare runner - uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6 + uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190 with: - disable_ccache: true + enable_ccache: false - uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: @@ -91,8 +91,7 @@ jobs: LAST_TAG="$(gh release view --json tagName -q .tagName --repo XRPLF/clio)" LAST_TAG_COMMIT="$(git rev-parse $LAST_TAG)" BASE_COMMIT="$(git merge-base HEAD $LAST_TAG_COMMIT)" - git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc" - cat CHANGELOG.md >> "${RUNNER_TEMP}/release_notes.md" + git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc" >> "${RUNNER_TEMP}/release_notes.md" - name: Upload release notes uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 diff --git a/.github/workflows/sanitizers.yml b/.github/workflows/sanitizers.yml index fdf58e7bcb..ba397423b5 100644 --- a/.github/workflows/sanitizers.yml +++ b/.github/workflows/sanitizers.yml @@ -44,7 +44,7 @@ jobs: uses: ./.github/workflows/reusable-build-test.yml with: runs_on: heavy - container: '{ "image": "ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' download_ccache: false upload_ccache: false conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }} diff --git a/.github/workflows/update-docker-ci.yml b/.github/workflows/update-docker-ci.yml index 745ac5e0ff..7539c989fe 100644 --- a/.github/workflows/update-docker-ci.yml +++ b/.github/workflows/update-docker-ci.yml @@ -141,7 +141,7 @@ jobs: files: "docker/compilers/gcc/**" - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 - name: Login to GitHub Container Registry if: ${{ github.event_name != 'pull_request' }} @@ -290,7 +290,7 @@ jobs: files: "docker/tools/**" - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 - name: Login to GitHub Container Registry if: ${{ github.event_name != 'pull_request' }} diff --git a/.github/workflows/upload-conan-deps.yml b/.github/workflows/upload-conan-deps.yml index 07e05751ea..c980e36c09 100644 --- a/.github/workflows/upload-conan-deps.yml +++ b/.github/workflows/upload-conan-deps.yml @@ -78,9 +78,9 @@ jobs: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Prepare runner - uses: XRPLF/actions/prepare-runner@2ece4ec6ab7de266859a6f053571425b2bd684b6 + uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190 with: - disable_ccache: true + enable_ccache: false - name: Setup conan on macOS if: ${{ runner.os == 'macOS' }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 91fe5bf7ed..3e9f97892a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,12 +29,12 @@ repos: # Autoformat: YAML, JSON, Markdown, etc. - repo: https://github.com/rbubley/mirrors-prettier - rev: 3c603eae8faac85303ae675fd33325cff699a797 # frozen: v3.7.3 + rev: 14abee445aea04b39069c19b4bd54efff6775819 # frozen: v3.7.4 hooks: - id: prettier - repo: https://github.com/igorshubovych/markdownlint-cli - rev: c8fd5003603dd6f12447314ecd935ba87c09aff5 # frozen: v0.46.0 + rev: 76b3d32d3f4b965e1d6425253c59407420ae2c43 # frozen: v0.47.0 hooks: - id: markdownlint-fix exclude: LICENSE.md @@ -59,7 +59,7 @@ repos: ] - repo: https://github.com/psf/black-pre-commit-mirror - rev: 2892f1f81088477370d4fbc56545c05d33d2493f # frozen: 25.11.0 + rev: 831207fd435b47aeffdf6af853097e64322b4d44 # frozen: 25.12.0 hooks: - id: black @@ -94,7 +94,7 @@ repos: language: script - repo: https://github.com/pre-commit/mirrors-clang-format - rev: 4c26f99731e7c22a047c35224150ee9e43d7c03e # frozen: v21.1.6 + rev: 75ca4ad908dc4a99f57921f29b7e6c1521e10b26 # frozen: v21.1.8 hooks: - id: clang-format args: [--style=file] diff --git a/benchmarks/rpc/WorkQueueBenchmarks.cpp b/benchmarks/rpc/WorkQueueBenchmarks.cpp index 9b9ce69478..a5128abccd 100644 --- a/benchmarks/rpc/WorkQueueBenchmarks.cpp +++ b/benchmarks/rpc/WorkQueueBenchmarks.cpp @@ -28,19 +28,19 @@ #include "util/prometheus/Prometheus.hpp" #include -#include -#include #include #include -#include #include +#include #include #include #include #include #include #include +#include +#include using namespace rpc; using namespace util::config; @@ -80,36 +80,56 @@ benchmarkWorkQueue(benchmark::State& state) { init(); - auto const total = static_cast(state.range(0)); - auto const numThreads = static_cast(state.range(1)); - auto const maxSize = static_cast(state.range(2)); - auto const delayMs = static_cast(state.range(3)); + auto const wqThreads = static_cast(state.range(0)); + auto const maxQueueSize = static_cast(state.range(1)); + auto const clientThreads = static_cast(state.range(2)); + auto const itemsPerClient = static_cast(state.range(3)); + auto const clientProcessingMs = static_cast(state.range(4)); for (auto _ : state) { std::atomic_size_t totalExecuted = 0uz; std::atomic_size_t totalQueued = 0uz; state.PauseTiming(); - WorkQueue queue(numThreads, maxSize); + WorkQueue queue(wqThreads, maxQueueSize); state.ResumeTiming(); - for (auto i = 0uz; i < total; ++i) { - totalQueued += static_cast(queue.postCoro( - [&delayMs, &totalExecuted](auto yield) { - ++totalExecuted; - - boost::asio::steady_timer timer(yield.get_executor(), std::chrono::milliseconds{delayMs}); - timer.async_wait(yield); - }, - /* isWhiteListed = */ false - )); + std::vector threads; + threads.reserve(clientThreads); + + for (auto t = 0uz; t < clientThreads; ++t) { + threads.emplace_back([&] { + for (auto i = 0uz; i < itemsPerClient; ++i) { + totalQueued += static_cast(queue.postCoro( + [&clientProcessingMs, &totalExecuted](auto yield) { + ++totalExecuted; + + boost::asio::steady_timer timer( + yield.get_executor(), std::chrono::milliseconds{clientProcessingMs} + ); + timer.async_wait(yield); + + std::this_thread::sleep_for(std::chrono::microseconds{10}); + }, + /* isWhiteListed = */ false + )); + } + }); } + for (auto& t : threads) + t.join(); + queue.stop(); ASSERT(totalExecuted == totalQueued, "Totals don't match"); - ASSERT(totalQueued <= total, "Queued more than requested"); - ASSERT(totalQueued >= maxSize, "Queued less than maxSize"); + ASSERT(totalQueued <= itemsPerClient * clientThreads, "Queued more than requested"); + + if (maxQueueSize == 0) { + ASSERT(totalQueued == itemsPerClient * clientThreads, "Queued exactly the expected amount"); + } else { + ASSERT(totalQueued >= std::min(maxQueueSize, itemsPerClient * clientThreads), "Queued less than expected"); + } } } @@ -123,5 +143,5 @@ benchmarkWorkQueue(benchmark::State& state) */ // TODO: figure out what happens on 1 thread BENCHMARK(benchmarkWorkQueue) - ->ArgsProduct({{1'000, 10'000, 100'000}, {2, 4, 8}, {0, 5'000}, {10, 100, 250}}) + ->ArgsProduct({{2, 4, 8, 16}, {0, 5'000}, {4, 8, 16}, {1'000, 10'000}, {10, 100, 250}}) ->Unit(benchmark::kMillisecond); diff --git a/cliff.toml b/cliff.toml index 755a52476d..974da791a0 100644 --- a/cliff.toml +++ b/cliff.toml @@ -49,8 +49,6 @@ postprocessors = [ ] # render body even when there are no releases to process # render_always = true -# output file path -output = "CHANGELOG.md" [git] # parse the commits based on https://www.conventionalcommits.org diff --git a/cmake/ClioVersion.cmake b/cmake/ClioVersion.cmake index ba3b9b7a2b..939a0b5f4d 100644 --- a/cmake/ClioVersion.cmake +++ b/cmake/ClioVersion.cmake @@ -1,42 +1,42 @@ find_package(Git REQUIRED) -set(GIT_COMMAND describe --tags --exact-match) -execute_process( - COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE TAG - RESULT_VARIABLE RC - ERROR_VARIABLE ERR - OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_STRIP_TRAILING_WHITESPACE -) - -if (RC EQUAL 0) - message(STATUS "Found tag '${TAG}' in git. Will use it as Clio version") - set(CLIO_VERSION "${TAG}") - set(DOC_CLIO_VERSION "${TAG}") +if (DEFINED ENV{GITHUB_BRANCH_NAME}) + set(GIT_BUILD_BRANCH $ENV{GITHUB_BRANCH_NAME}) + set(GIT_COMMIT_HASH $ENV{GITHUB_HEAD_SHA}) else () - message(STATUS "Error finding tag in git: ${ERR}") - message(STATUS "Will use 'YYYYMMDDHMS--' as Clio version") - - set(GIT_COMMAND show -s --date=format:%Y%m%d%H%M%S --format=%cd) - execute_process( - COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE DATE - OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY - ) - set(GIT_COMMAND branch --show-current) execute_process( - COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BRANCH + COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_BUILD_BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY ) - set(GIT_COMMAND rev-parse --short HEAD) + set(GIT_COMMAND rev-parse HEAD) execute_process( - COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE REV + COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMIT_HASH OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY ) +endif () + +execute_process( + COMMAND date +%Y%m%d%H%M%S WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BUILD_DATE + OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY +) + +message(STATUS "Git branch: ${GIT_BUILD_BRANCH}") +message(STATUS "Git commit hash: ${GIT_COMMIT_HASH}") +message(STATUS "Build date: ${BUILD_DATE}") + +if (DEFINED ENV{FORCE_CLIO_VERSION} AND NOT "$ENV{FORCE_CLIO_VERSION}" STREQUAL "") + message(STATUS "Using explicitly provided '${FORCE_CLIO_VERSION}' as Clio version") + + set(CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}") + set(DOC_CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}") +else () + message(STATUS "Using 'YYYYMMDDHMS--' as Clio version") + + string(SUBSTRING ${GIT_COMMIT_HASH} 0 7 GIT_COMMIT_HASH_SHORT) - set(CLIO_VERSION "${DATE}-${BRANCH}-${REV}") + set(CLIO_VERSION "${BUILD_DATE}-${GIT_BUILD_BRANCH}-${GIT_COMMIT_HASH_SHORT}") set(DOC_CLIO_VERSION "develop") endif () diff --git a/conan.lock b/conan.lock index cb5ca49113..3bbf1eee4d 100644 --- a/conan.lock +++ b/conan.lock @@ -1,42 +1,42 @@ { "version": "0.5", "requires": [ - "zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497", - "xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683", + "zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075", + "xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1765850149.987", "xrpl/3.0.0#534d3f65a336109eee929b88962bae4e%1765375071.547", - "sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869", - "spdlog/1.16.0#942c2c39562ae25ba575d9c8e2bdf3b6%1763984117.108", - "soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318", - "re2/20230301#ca3b241baec15bd31ea9187150e0b333%1764175362.029", + "sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1765850149.926", + "spdlog/1.17.0#bcbaaf7147bda6ad24ffbd1ac3d7142c%1767636069.964", + "soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1765850149.46", + "re2/20230301#ca3b241baec15bd31ea9187150e0b333%1765850148.103", "rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1754325007.656", "protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456", "openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729", - "nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1763150366.909", + "nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1765850143.957", "minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1754325004.374", - "lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999", + "lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1765850143.914", "libuv/1.46.0#dc28c1f653fa197f00db5b577a6f6011%1754325003.592", - "libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64", - "libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03", - "libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1764175360.142", + "libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1765842973.492", + "libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1765842973.03", + "libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1765850144.736", "http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1754325001.385", - "gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1754325000.842", + "gtest/1.17.0#5224b3b3ff3b4ce1133cbdd27d53ee7d%1755784855.585", "grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958", "fmt/12.1.0#50abab23274d56bb8f42c94b3b9a40c7%1763984116.926", "doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819", - "date/3.0.4#862e11e80030356b53c2c38599ceb32b%1763584497.32", + "date/3.0.4#862e11e80030356b53c2c38599ceb32b%1765850143.772", "cassandra-cpp-driver/2.17.0#bd3934138689482102c265d01288a316%1764175359.611", - "c-ares/1.34.5#5581c2b62a608b40bb85d965ab3ec7c8%1764175359.429", - "bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1764175359.429", + "c-ares/1.34.5#5581c2b62a608b40bb85d965ab3ec7c8%1765850144.336", + "bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1765850143.837", "boost/1.83.0#91d8b1572534d2c334d6790e3c34d0c1%1764175359.61", "benchmark/1.9.4#ce4403f7a24d3e1f907cd9da4b678be4%1754578869.672", "abseil/20230802.1#90ba607d4ee8fb5fb157c3db540671fc%1764175359.429" ], "build_requires": [ - "zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497", + "zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075", "protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456", - "cmake/4.2.0#ae0a44f44a1ef9ab68fd4b3e9a1f8671%1764175359.44", - "cmake/3.31.10#313d16a1aa16bbdb2ca0792467214b76%1764175359.429", - "b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28" + "cmake/4.2.0#ae0a44f44a1ef9ab68fd4b3e9a1f8671%1765850153.937", + "cmake/3.31.10#313d16a1aa16bbdb2ca0792467214b76%1765850153.479", + "b2/5.3.3#107c15377719889654eb9a162a673975%1765850144.355" ], "python_requires": [], "overrides": { @@ -53,9 +53,6 @@ ], "sqlite3/3.44.2": [ "sqlite3/3.49.1" - ], - "fmt/12.0.0": [ - "fmt/12.1.0" ] }, "config_requires": [] diff --git a/conanfile.py b/conanfile.py index de5a450527..f06e5d0183 100644 --- a/conanfile.py +++ b/conanfile.py @@ -14,37 +14,37 @@ class ClioConan(ConanFile): requires = [ "boost/1.83.0", "cassandra-cpp-driver/2.17.0", - "protobuf/3.21.12", + "fmt/12.1.0", "grpc/1.50.1", + "libbacktrace/cci.20210118", "openssl/1.1.1w", + "protobuf/3.21.12", + "spdlog/1.17.0", "xrpl/3.0.0", "zlib/1.3.1", - "libbacktrace/cci.20210118", - "spdlog/1.16.0", ] default_options = { - "xrpl/*:tests": False, - "xrpl/*:rocksdb": False, "cassandra-cpp-driver/*:shared": False, "date/*:header_only": True, - "grpc/*:shared": False, "grpc/*:secure": True, + "grpc/*:shared": False, + "gtest/*:no_main": True, "libpq/*:shared": False, "lz4/*:shared": False, "openssl/*:shared": False, "protobuf/*:shared": False, "protobuf/*:with_zlib": True, "snappy/*:shared": False, - "gtest/*:no_main": True, + "xrpl/*:rocksdb": False, + "xrpl/*:tests": False, } exports_sources = ("CMakeLists.txt", "cmake/*", "src/*") def requirements(self): - self.requires("gtest/1.14.0") + self.requires("gtest/1.17.0") self.requires("benchmark/1.9.4") - self.requires("fmt/12.1.0", force=True) def configure(self): if self.settings.compiler == "apple-clang": diff --git a/docker/ci/Dockerfile b/docker/ci/Dockerfile index 6d5c5caf0f..9c55f7f297 100644 --- a/docker/ci/Dockerfile +++ b/docker/ci/Dockerfile @@ -54,7 +54,7 @@ RUN pip install -q --no-cache-dir \ # lxml 6.0.0 is not compatible with our image 'lxml<6.0.0' \ cmake \ - conan==2.22.1 \ + conan==2.24.0 \ gcovr \ # We're adding pre-commit to this image as well, # because clang-tidy workflow requires it diff --git a/docker/ci/README.md b/docker/ci/README.md index d899afab08..0bf5599919 100644 --- a/docker/ci/README.md +++ b/docker/ci/README.md @@ -5,15 +5,15 @@ It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but c The image is based on Ubuntu 20.04 and contains: -- ccache 4.12.1 +- ccache 4.12.2 - Clang 19 - ClangBuildAnalyzer 1.6.0 -- Conan 2.22.1 -- Doxygen 1.15.0 +- Conan 2.24.0 +- Doxygen 1.16.1 - GCC 15.2.0 -- GDB 16.3 -- gh 2.82.1 -- git-cliff 2.10.1 +- GDB 17.1 +- gh 2.83.2 +- git-cliff 2.11.0 - mold 2.40.4 - Ninja 1.13.2 - Python 3.8 diff --git a/docker/develop/compose.yaml b/docker/develop/compose.yaml index 35443fece0..bd18473d8b 100644 --- a/docker/develop/compose.yaml +++ b/docker/develop/compose.yaml @@ -1,6 +1,6 @@ services: clio_develop: - image: ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f + image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 volumes: - clio_develop_conan_data:/root/.conan2/p - clio_develop_ccache:/root/.ccache diff --git a/docker/tools/Dockerfile b/docker/tools/Dockerfile index 71f09000cf..16196ea271 100644 --- a/docker/tools/Dockerfile +++ b/docker/tools/Dockerfile @@ -42,7 +42,7 @@ RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v && ninja install \ && rm -rf /tmp/* /var/tmp/* -ARG CCACHE_VERSION=4.12.1 +ARG CCACHE_VERSION=4.12.2 RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \ && tar xf "ccache-${CCACHE_VERSION}.tar.gz" \ && cd "ccache-${CCACHE_VERSION}" \ @@ -59,7 +59,7 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -ARG DOXYGEN_VERSION=1.15.0 +ARG DOXYGEN_VERSION=1.16.1 RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \ && tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \ && cd "doxygen-${DOXYGEN_VERSION}" \ @@ -79,13 +79,13 @@ RUN wget --progress=dot:giga "https://github.com/aras-p/ClangBuildAnalyzer/archi && ninja install \ && rm -rf /tmp/* /var/tmp/* -ARG GIT_CLIFF_VERSION=2.10.1 +ARG GIT_CLIFF_VERSION=2.11.0 RUN wget --progress=dot:giga "https://github.com/orhun/git-cliff/releases/download/v${GIT_CLIFF_VERSION}/git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz" \ && tar xf git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz \ && mv git-cliff-${GIT_CLIFF_VERSION}/git-cliff /usr/local/bin/git-cliff \ && rm -rf /tmp/* /var/tmp/* -ARG GH_VERSION=2.82.1 +ARG GH_VERSION=2.83.2 RUN wget --progress=dot:giga "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz" \ && tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \ && mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/local/bin/gh \ @@ -100,7 +100,7 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -ARG GDB_VERSION=16.3 +ARG GDB_VERSION=17.1 RUN wget --progress=dot:giga "https://sourceware.org/pub/gdb/releases/gdb-${GDB_VERSION}.tar.gz" \ && tar xf "gdb-${GDB_VERSION}.tar.gz" \ && cd "gdb-${GDB_VERSION}" \ diff --git a/docs/build-clio.md b/docs/build-clio.md index 791bc49d37..81a43e0067 100644 --- a/docs/build-clio.md +++ b/docs/build-clio.md @@ -175,7 +175,7 @@ Open the `index.html` file in your browser to see the documentation pages. It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine. ```sh -docker run -it ghcr.io/xrplf/clio-ci:067449c3f8ae6755ea84752ea2962b589fe56c8f +docker run -it ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 git clone https://github.com/XRPLF/clio cd clio ``` diff --git a/docs/config-description.md b/docs/config-description.md index e4a3f57fb4..9c6516474a 100644 --- a/docs/config-description.md +++ b/docs/config-description.md @@ -457,6 +457,14 @@ This document provides a list of all available Clio configuration properties in - **Constraints**: None - **Description**: Max allowed difference between the latest sequence in DB and in cache file. If the cache file is too old (contains too low latest sequence) Clio will reject using it. +### cache.file.async_save + +- **Required**: True +- **Type**: boolean +- **Default value**: `False` +- **Constraints**: None +- **Description**: When false, Clio waits for cache saving to finish before shutting down. When true, cache saving runs in parallel with other shutdown operations. + ### log.channels.[].channel - **Required**: False diff --git a/src/app/CliArgs.cpp b/src/app/CliArgs.cpp index 024b17f42a..45ae424043 100644 --- a/src/app/CliArgs.cpp +++ b/src/app/CliArgs.cpp @@ -77,7 +77,10 @@ CliArgs::parse(int argc, char const* argv[]) } if (parsed.contains("version")) { - std::cout << util::build::getClioFullVersionString() << '\n'; + std::cout << util::build::getClioFullVersionString() << '\n' + << "Git commit hash: " << util::build::getGitCommitHash() << '\n' + << "Git build branch: " << util::build::getGitBuildBranch() << '\n' + << "Build date: " << util::build::getBuildDate() << '\n'; return Action{Action::Exit{EXIT_SUCCESS}}; } diff --git a/src/data/AmendmentCenter.cpp b/src/data/AmendmentCenter.cpp index eb7c14eae9..4863ff0e57 100644 --- a/src/data/AmendmentCenter.cpp +++ b/src/data/AmendmentCenter.cpp @@ -146,9 +146,12 @@ AmendmentCenter::isEnabled(AmendmentKey const& key, uint32_t seq) const bool AmendmentCenter::isEnabled(boost::asio::yield_context yield, AmendmentKey const& key, uint32_t seq) const { - if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) - return lookupAmendment(all_, *listAmendments, key); - + try { + if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) + return lookupAmendment(all_, *listAmendments, key); + } catch (std::runtime_error const&) { + return false; // Some old ledger does not contain Amendments ledger object so do best we can for now + } return false; } @@ -157,13 +160,19 @@ AmendmentCenter::isEnabled(boost::asio::yield_context yield, std::vector out; - rg::transform(keys, std::back_inserter(out), [this, &listAmendments](auto const& key) { - return lookupAmendment(all_, *listAmendments, key); - }); - - return out; + try { + if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) { + std::vector out; + rg::transform(keys, std::back_inserter(out), [this, &listAmendments](auto const& key) { + return lookupAmendment(all_, *listAmendments, key); + }); + + return out; + } + } catch (std::runtime_error const&) { + return std::vector( + keys.size(), false + ); // Some old ledger does not contain Amendments ledger object so do best we can for now } return std::vector(keys.size(), false); diff --git a/src/data/LedgerCacheSaver.cpp b/src/data/LedgerCacheSaver.cpp index 0ff2390523..3d4d27e42f 100644 --- a/src/data/LedgerCacheSaver.cpp +++ b/src/data/LedgerCacheSaver.cpp @@ -30,7 +30,9 @@ namespace data { LedgerCacheSaver::LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache) - : cacheFilePath_(config.maybeValue("cache.file.path")), cache_(cache) + : cacheFilePath_(config.maybeValue("cache.file.path")) + , cache_(cache) + , isAsync_(config.get("cache.file.async_save")) { } @@ -56,6 +58,9 @@ LedgerCacheSaver::save() LOG(util::LogService::error()) << "Error saving LedgerCache to file: " << success.error(); } }); + if (not isAsync_) { + waitToFinish(); + } } void diff --git a/src/data/LedgerCacheSaver.hpp b/src/data/LedgerCacheSaver.hpp index 7cf68f1dd3..9d21ea4a87 100644 --- a/src/data/LedgerCacheSaver.hpp +++ b/src/data/LedgerCacheSaver.hpp @@ -53,6 +53,7 @@ class LedgerCacheSaver { std::optional cacheFilePath_; std::reference_wrapper cache_; std::optional savingThread_; + bool isAsync_; public: /** diff --git a/src/rpc/WorkQueue.cpp b/src/rpc/WorkQueue.cpp index b676fc64bd..a0a9688238 100644 --- a/src/rpc/WorkQueue.cpp +++ b/src/rpc/WorkQueue.cpp @@ -25,9 +25,7 @@ #include "util/prometheus/Label.hpp" #include "util/prometheus/Prometheus.hpp" -#include #include -#include #include #include @@ -39,6 +37,27 @@ namespace rpc { +void +WorkQueue::OneTimeCallable::setCallable(std::function func) +{ + func_ = std::move(func); +} + +void +WorkQueue::OneTimeCallable::operator()() +{ + if (not called_) { + func_(); + called_ = true; + } +} + +WorkQueue::OneTimeCallable:: +operator bool() const +{ + return func_.operator bool(); +} + WorkQueue::WorkQueue(DontStartProcessingTag, std::uint32_t numWorkers, uint32_t maxSize) : queued_{PrometheusService::counterInt( "work_queue_queued_total_number", @@ -56,8 +75,6 @@ WorkQueue::WorkQueue(DontStartProcessingTag, std::uint32_t numWorkers, uint32_t "The current number of tasks in the queue" )} , ioc_{numWorkers} - , strand_{ioc_.get_executor()} - , waitTimer_(ioc_) { if (maxSize != 0) maxSize_ = maxSize; @@ -77,12 +94,14 @@ WorkQueue::~WorkQueue() void WorkQueue::startProcessing() { - util::spawn(strand_, [this](auto yield) { - ASSERT(not hasDispatcher_, "Dispatcher already running"); + ASSERT(not processingStarted_, "Attempt to start processing work queue more than once"); + processingStarted_ = true; - hasDispatcher_ = true; - dispatcherLoop(yield); - }); + // Spawn workers for all tasks that were queued before processing started + auto const numTasks = size(); + for (auto i = 0uz; i < numTasks; ++i) { + util::spawn(ioc_, [this](auto yield) { executeTask(yield); }); + } } bool @@ -98,93 +117,28 @@ WorkQueue::postCoro(TaskType func, bool isWhiteListed, Priority priority) return false; } - ++curSize_.get(); - auto needsWakeup = false; - { - auto state = dispatcherState_.lock(); - - needsWakeup = std::exchange(state->isIdle, false); - + auto state = queueState_.lock(); state->push(priority, std::move(func)); } - if (needsWakeup) - boost::asio::post(strand_, [this] { waitTimer_.cancel(); }); - - return true; -} - -void -WorkQueue::dispatcherLoop(boost::asio::yield_context yield) -{ - LOG(log_.info()) << "WorkQueue dispatcher starting"; - - // all ongoing tasks must be completed before stopping fully - while (not stopping_ or size() > 0) { - std::optional task; - - { - auto state = dispatcherState_.lock(); - - if (state->empty()) { - state->isIdle = true; - } else { - task = state->popNext(); - } - } - - if (not stopping_ and not task.has_value()) { - waitTimer_.expires_at(std::chrono::steady_clock::time_point::max()); - boost::system::error_code ec; - waitTimer_.async_wait(yield[ec]); - } else if (task.has_value()) { - util::spawn( - ioc_, - [this, spawnedAt = std::chrono::system_clock::now(), task = std::move(*task)](auto yield) mutable { - auto const takenAt = std::chrono::system_clock::now(); - auto const waited = - std::chrono::duration_cast(takenAt - spawnedAt).count(); - - ++queued_.get(); - durationUs_.get() += waited; - LOG(log_.info()) << "WorkQueue wait time: " << waited << ", queue size: " << size(); - - task(yield); - - --curSize_.get(); - } - ); - } - } + ++curSize_.get(); - LOG(log_.info()) << "WorkQueue dispatcher shutdown requested - time to execute onTasksComplete"; + if (not processingStarted_) + return true; - { - auto onTasksComplete = onQueueEmpty_.lock(); - ASSERT(onTasksComplete->operator bool(), "onTasksComplete must be set when stopping is true."); - onTasksComplete->operator()(); - } + util::spawn(ioc_, [this](auto yield) { executeTask(yield); }); - LOG(log_.info()) << "WorkQueue dispatcher finished"; + return true; } void WorkQueue::requestStop(std::function onQueueEmpty) { auto handler = onQueueEmpty_.lock(); - *handler = std::move(onQueueEmpty); + handler->setCallable(std::move(onQueueEmpty)); stopping_ = true; - auto needsWakeup = false; - - { - auto state = dispatcherState_.lock(); - needsWakeup = std::exchange(state->isIdle, false); - } - - if (needsWakeup) - boost::asio::post(strand_, [this] { waitTimer_.cancel(); }); } void @@ -194,6 +148,12 @@ WorkQueue::stop() requestStop(); ioc_.join(); + + { + auto onTasksComplete = onQueueEmpty_.lock(); + ASSERT(onTasksComplete->operator bool(), "onTasksComplete must be set when stopping is true."); + onTasksComplete->operator()(); + } } WorkQueue @@ -227,4 +187,29 @@ WorkQueue::size() const return curSize_.get().value(); } +void +WorkQueue::executeTask(boost::asio::yield_context yield) +{ + std::optional taskWithTimestamp; + { + auto state = queueState_.lock(); + taskWithTimestamp = state->popNext(); + } + + ASSERT( + taskWithTimestamp.has_value(), + "Queue should not be empty as we spawn a coro with executeTask for each postCoro." + ); + auto const takenAt = std::chrono::system_clock::now(); + auto const waited = + std::chrono::duration_cast(takenAt - taskWithTimestamp->queuedAt).count(); + + ++queued_.get(); + durationUs_.get() += waited; + LOG(log_.info()) << "WorkQueue wait time: " << waited << ", queue size: " << size(); + + taskWithTimestamp->task(yield); + --curSize_.get(); +} + } // namespace rpc diff --git a/src/rpc/WorkQueue.hpp b/src/rpc/WorkQueue.hpp index 8fa466501f..30ea4d7b38 100644 --- a/src/rpc/WorkQueue.hpp +++ b/src/rpc/WorkQueue.hpp @@ -25,15 +25,12 @@ #include "util/prometheus/Counter.hpp" #include "util/prometheus/Gauge.hpp" -#include #include -#include -#include #include -#include #include #include +#include #include #include #include @@ -64,7 +61,13 @@ struct Reportable { */ class WorkQueue : public Reportable { using TaskType = std::function; - using QueueType = std::queue; + + struct TaskWithTimestamp { + TaskType task; + std::chrono::system_clock::time_point queuedAt; + }; + + using QueueType = std::queue; public: /** @@ -76,22 +79,21 @@ class WorkQueue : public Reportable { }; private: - struct DispatcherState { + struct QueueState { QueueType high; QueueType normal; - bool isIdle = false; size_t highPriorityCounter = 0; void - push(Priority priority, auto&& task) + push(Priority priority, TaskType&& task) { auto& queue = [this, priority] -> QueueType& { if (priority == Priority::High) return high; return normal; }(); - queue.push(std::forward(task)); + queue.push(TaskWithTimestamp{.task = std::move(task), .queuedAt = std::chrono::system_clock::now()}); } [[nodiscard]] bool @@ -100,21 +102,21 @@ class WorkQueue : public Reportable { return high.empty() and normal.empty(); } - [[nodiscard]] std::optional + [[nodiscard]] std::optional popNext() { if (not high.empty() and (highPriorityCounter < kTAKE_HIGH_PRIO or normal.empty())) { - auto task = std::move(high.front()); + auto taskWithTimestamp = std::move(high.front()); high.pop(); ++highPriorityCounter; - return task; + return taskWithTimestamp; } if (not normal.empty()) { - auto task = std::move(normal.front()); + auto taskWithTimestamp = std::move(normal.front()); normal.pop(); highPriorityCounter = 0; - return task; + return taskWithTimestamp; } return std::nullopt; @@ -133,14 +135,26 @@ class WorkQueue : public Reportable { util::Logger log_{"RPC"}; boost::asio::thread_pool ioc_; - boost::asio::strand strand_; - bool hasDispatcher_ = false; std::atomic_bool stopping_; + std::atomic_bool processingStarted_{false}; - util::Mutex> onQueueEmpty_; - util::Mutex dispatcherState_; - boost::asio::steady_timer waitTimer_; + class OneTimeCallable { + std::function func_; + bool called_{false}; + + public: + void + setCallable(std::function func); + + void + operator()(); + + explicit + operator bool() const; + }; + util::Mutex onQueueEmpty_; + util::Mutex queueState_; public: struct DontStartProcessingTag {}; @@ -234,7 +248,7 @@ class WorkQueue : public Reportable { private: void - dispatcherLoop(boost::asio::yield_context yield); + executeTask(boost::asio::yield_context yield); }; } // namespace rpc diff --git a/src/util/Assert.cpp b/src/util/Assert.cpp index 87165d607b..94f415951d 100644 --- a/src/util/Assert.cpp +++ b/src/util/Assert.cpp @@ -54,10 +54,10 @@ OnAssert::resetAction() void OnAssert::defaultAction(std::string_view message) { - if (LogServiceState::initialized()) { + if (LogServiceState::initialized() and LogServiceState::hasSinks()) { LOG(LogService::fatal()) << message; } else { - std::cerr << message; + std::cerr << message << std::endl; } std::exit(EXIT_FAILURE); // std::abort does not flush gcovr output and causes uncovered lines } diff --git a/src/util/Channel.hpp b/src/util/Channel.hpp new file mode 100644 index 0000000000..9b6b6c8edb --- /dev/null +++ b/src/util/Channel.hpp @@ -0,0 +1,382 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2025, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace util { + +#ifdef __clang__ +namespace detail { +// Forward declaration for compile-time check +template +struct ChannelInstantiated; +} // namespace detail +#endif + +/** + * @brief Represents a go-like channel, a multi-producer (Sender) multi-consumer (Receiver) thread-safe data pipe. + * @note Use INSTANTIATE_CHANNEL_FOR_CLANG macro when using this class. See docs at the bottom of the file for more + * details. + * + * @tparam T The type of data the channel transfers + */ +template +class Channel { +private: + class ControlBlock { + using InternalChannelType = boost::asio::experimental::concurrent_channel; + boost::asio::any_io_executor executor_; + InternalChannelType ch_; + + public: + ControlBlock(auto&& context, std::size_t capacity) : executor_(context.get_executor()), ch_(context, capacity) + { + } + + [[nodiscard]] InternalChannelType& + channel() + { + return ch_; + } + + void + close() + { + if (not isClosed()) { + ch_.close(); + // Workaround for Boost bug: close() alone doesn't cancel pending async operations. + // We must call cancel() to unblock them. The bug also causes cancel() to return + // error_code 0 instead of channel_cancelled, so async operations must check + // isClosed() to detect this case. + // https://github.com/chriskohlhoff/asio/issues/1575 + ch_.cancel(); + } + } + + [[nodiscard]] bool + isClosed() const + { + return not ch_.is_open(); + } + }; + + /** + * @brief This is used to close the channel once either all Senders or all Receivers are destroyed + */ + struct Guard { + std::shared_ptr shared; + + ~Guard() + { + shared->close(); + } + }; + + /** + * @brief The sending end of a channel. + * + * Sender is copyable and movable. The channel remains open as long as at least one Sender exists. + * When all Sender instances are destroyed, the channel is closed and receivers will receive std::nullopt. + */ + class Sender { + std::shared_ptr shared_; + std::shared_ptr guard_; + + public: + /** + * @brief Constructs a Sender from a shared control block. + * @param shared The shared control block managing the channel state + */ + Sender(std::shared_ptr shared) + : shared_(std::move(shared)), guard_(std::make_shared(shared_)) {}; + + Sender(Sender&&) = default; + Sender(Sender const&) = default; + Sender& + operator=(Sender&&) = default; + Sender& + operator=(Sender const&) = default; + + /** + * @brief Asynchronously sends data through the channel using a coroutine. + * + * Blocks the coroutine until the data is sent or the channel is closed. + * + * @tparam D The type of data to send (must be convertible to T) + * @param data The data to send + * @param yield The Boost.Asio yield context for coroutine suspension + * @return true if the data was sent successfully, false if the channel is closed + */ + template + bool + asyncSend(D&& data, boost::asio::yield_context yield) + requires(std::convertible_to, std::remove_cvref_t>) + { + boost::system::error_code const ecIn; + boost::system::error_code ecOut; + shared_->channel().async_send(ecIn, std::forward(data), yield[ecOut]); + + // Workaround: asio channels bug returns ec=0 on cancel, check isClosed() instead + if (not ecOut and shared_->isClosed()) + return false; + + return not ecOut; + } + + /** + * @brief Asynchronously sends data through the channel using a callback. + * + * The callback is invoked when the send operation completes. + * + * @tparam D The type of data to send (must be convertible to T) + * @param data The data to send + * @param fn Callback function invoked with true if successful, false if the channel is closed + */ + template + void + asyncSend(D&& data, std::invocable auto&& fn) + requires(std::convertible_to, std::remove_cvref_t>) + { + boost::system::error_code const ecIn; + shared_->channel().async_send( + ecIn, + std::forward(data), + [fn = std::forward(fn), shared = shared_](boost::system::error_code ec) mutable { + // Workaround: asio channels bug returns ec=0 on cancel, check isClosed() instead + if (not ec and shared->isClosed()) { + fn(false); + return; + } + + fn(not ec); + } + ); + } + + /** + * @brief Attempts to send data through the channel without blocking. + * + * @tparam D The type of data to send (must be convertible to T) + * @param data The data to send + * @return true if the data was sent successfully, false if the channel is full or closed + */ + template + bool + trySend(D&& data) + requires(std::convertible_to, std::remove_cvref_t>) + { + boost::system::error_code ec; + return shared_->channel().try_send(ec, std::forward(data)); + } + }; + + /** + * @brief The receiving end of a channel. + * + * Receiver is copyable and movable. Multiple receivers can consume from the same channel concurrently. + * When all Receiver instances are destroyed, the channel is closed and senders will fail to send. + */ + class Receiver { + std::shared_ptr shared_; + std::shared_ptr guard_; + + public: + /** + * @brief Constructs a Receiver from a shared control block. + * @param shared The shared control block managing the channel state + */ + Receiver(std::shared_ptr shared) + : shared_(std::move(shared)), guard_(std::make_shared(shared_)) {}; + + Receiver(Receiver&&) = default; + Receiver(Receiver const&) = default; + Receiver& + operator=(Receiver&&) = default; + Receiver& + operator=(Receiver const&) = default; + + /** + * @brief Attempts to receive data from the channel without blocking. + * + * @return std::optional containing the received value, or std::nullopt if the channel is empty or closed + */ + std::optional + tryReceive() + { + std::optional result; + shared_->channel().try_receive([&result](boost::system::error_code ec, auto&& value) { + if (not ec) + result = std::forward(value); + }); + + return result; + } + + /** + * @brief Asynchronously receives data from the channel using a coroutine. + * + * Blocks the coroutine until data is available or the channel is closed. + * + * @param yield The Boost.Asio yield context for coroutine suspension + * @return std::optional containing the received value, or std::nullopt if the channel is closed + */ + [[nodiscard]] std::optional + asyncReceive(boost::asio::yield_context yield) + { + boost::system::error_code ec; + auto value = shared_->channel().async_receive(yield[ec]); + + if (ec) + return std::nullopt; + + return value; + } + + /** + * @brief Asynchronously receives data from the channel using a callback. + * + * The callback is invoked when data is available or the channel is closed. + * + * @param fn Callback function invoked with std::optional containing the value, or std::nullopt if closed + */ + void + asyncReceive(std::invocable>> auto&& fn) + { + shared_->channel().async_receive( + [fn = std::forward(fn)](boost::system::error_code ec, T&& value) mutable { + if (ec) { + fn(std::optional(std::nullopt)); + return; + } + + fn(std::make_optional(std::move(value))); + } + ); + } + + /** + * @brief Checks if the channel is closed. + * + * A channel is closed when all Sender instances have been destroyed. + * + * @return true if the channel is closed, false otherwise + */ + [[nodiscard]] bool + isClosed() const + { + return shared_->isClosed(); + } + }; + +public: + /** + * @brief Factory function to create channel components. + * @param context A supported context type (either io_context or thread_pool) + * @param capacity Size of the internal buffer on the channel + * @return A pair of Sender and Receiver + */ + static std::pair + create(auto&& context, std::size_t capacity) + { +#ifdef __clang__ + static_assert( + util::detail::ChannelInstantiated::value, + "When using Channel with Clang, you must add INSTANTIATE_CHANNEL_FOR_CLANG(T) " + "to one .cpp file. See documentation at the bottom of Channel.hpp for details." + ); +#endif + auto shared = std::make_shared(std::forward(context), capacity); + auto sender = Sender{shared}; + auto receiver = Receiver{std::move(shared)}; + + return {std::move(sender), std::move(receiver)}; + } +}; + +} // namespace util + +// ================================================================================================ +// Clang/Apple Clang Workaround for Boost.Asio Experimental Channels +// ================================================================================================ +// +// IMPORTANT: When using Channel with Clang or Apple Clang, you MUST add the following line +// to ONE .cpp file that uses Channel: +// +// INSTANTIATE_CHANNEL_FOR_CLANG(YourType) +// +// Example: +// // In ChannelTests.cpp or any .cpp file that uses Channel: +// #include "util/Channel.hpp" +// INSTANTIATE_CHANNEL_FOR_CLANG(int) +// +// Why this is needed: +// Boost.Asio's experimental concurrent_channel has a bug where close() doesn't properly cancel +// pending async operations. When using cancellation signals (which we do in our workaround), +// Clang generates vtable references for internal cancellation_handler types but Boost.Asio +// doesn't provide the definitions, causing linker errors: +// +// Undefined symbols for architecture arm64: +// "boost::asio::detail::cancellation_handler<...>::call(boost::asio::cancellation_type)" +// "boost::asio::detail::cancellation_handler<...>::destroy()" +// +// This macro explicitly instantiates the required template specializations. +// +// See: https://github.com/chriskohlhoff/asio/issues/1575 +// +#ifdef __clang__ + +#include +#include +#include + +namespace util::detail { +// Tag type used to verify that INSTANTIATE_CHANNEL_FOR_CLANG was called for a given type +template +struct ChannelInstantiated : std::false_type {}; +} // namespace util::detail + +#define INSTANTIATE_CHANNEL_FOR_CLANG(T) \ + /* NOLINTNEXTLINE(cppcoreguidelines-virtual-class-destructor) */ \ + template class boost::asio::detail::cancellation_handler< \ + boost::asio::experimental::detail::channel_service:: \ + op_cancellation, void(boost::system::error_code, T)>>; \ + namespace util::detail { \ + template <> \ + struct ChannelInstantiated : std::true_type {}; \ + } + +#else + +// No workaround needed for non-Clang compilers +#define INSTANTIATE_CHANNEL_FOR_CLANG(T) + +#endif diff --git a/src/util/build/Build.cpp b/src/util/build/Build.cpp index 095b0fbf10..35a690e0d6 100644 --- a/src/util/build/Build.cpp +++ b/src/util/build/Build.cpp @@ -26,7 +26,20 @@ namespace util::build { #ifndef CLIO_VERSION #error "CLIO_VERSION must be defined" #endif +#ifndef GIT_COMMIT_HASH +#error "GIT_COMMIT_HASH must be defined" +#endif +#ifndef GIT_BUILD_BRANCH +#error "GIT_BUILD_BRANCH must be defined" +#endif +#ifndef BUILD_DATE +#error "BUILD_DATE must be defined" +#endif + static constexpr char kVERSION_STRING[] = CLIO_VERSION; +static constexpr char kGIT_COMMIT_HASH[] = GIT_COMMIT_HASH; +static constexpr char kGIT_BUILD_BRANCH[] = GIT_BUILD_BRANCH; +static constexpr char kBUILD_DATE[] = BUILD_DATE; std::string const& getClioVersionString() @@ -42,4 +55,25 @@ getClioFullVersionString() return value; } +std::string const& +getGitCommitHash() +{ + static std::string const value = kGIT_COMMIT_HASH; // NOLINT(readability-identifier-naming) + return value; +} + +std::string const& +getGitBuildBranch() +{ + static std::string const value = kGIT_BUILD_BRANCH; // NOLINT(readability-identifier-naming) + return value; +} + +std::string const& +getBuildDate() +{ + static std::string const value = kBUILD_DATE; // NOLINT(readability-identifier-naming) + return value; +} + } // namespace util::build diff --git a/src/util/build/Build.hpp b/src/util/build/Build.hpp index 0230b95c81..1b929172c3 100644 --- a/src/util/build/Build.hpp +++ b/src/util/build/Build.hpp @@ -29,4 +29,13 @@ getClioVersionString(); std::string const& getClioFullVersionString(); +std::string const& +getGitCommitHash(); + +std::string const& +getGitBuildBranch(); + +std::string const& +getBuildDate(); + } // namespace util::build diff --git a/src/util/build/CMakeLists.txt b/src/util/build/CMakeLists.txt index cf7a5b0d00..740ced03e1 100644 --- a/src/util/build/CMakeLists.txt +++ b/src/util/build/CMakeLists.txt @@ -3,4 +3,7 @@ include(${CMAKE_CURRENT_SOURCE_DIR}/../../../cmake/ClioVersion.cmake) add_library(clio_build_version) target_sources(clio_build_version PRIVATE Build.cpp) target_link_libraries(clio_build_version PUBLIC clio_options) -target_compile_definitions(clio_build_version PRIVATE CLIO_VERSION="${CLIO_VERSION}") +target_compile_definitions( + clio_build_version PRIVATE CLIO_VERSION="${CLIO_VERSION}" GIT_COMMIT_HASH="${GIT_COMMIT_HASH}" + GIT_BUILD_BRANCH="${GIT_BUILD_BRANCH}" BUILD_DATE="${BUILD_DATE}" +) diff --git a/src/util/config/ConfigDefinition.cpp b/src/util/config/ConfigDefinition.cpp index 0ed6bf7250..430a8b34a7 100644 --- a/src/util/config/ConfigDefinition.cpp +++ b/src/util/config/ConfigDefinition.cpp @@ -361,6 +361,7 @@ getClioConfig() {"cache.load", ConfigValue{ConfigType::String}.defaultValue("async").withConstraint(gValidateLoadMode)}, {"cache.file.path", ConfigValue{ConfigType::String}.optional()}, {"cache.file.max_sequence_age", ConfigValue{ConfigType::Integer}.defaultValue(5000)}, + {"cache.file.async_save", ConfigValue{ConfigType::Boolean}.defaultValue(false)}, {"log.channels.[].channel", Array{ConfigValue{ConfigType::String}.optional().withConstraint(gValidateChannelName)}}, diff --git a/src/util/config/ConfigDescription.hpp b/src/util/config/ConfigDescription.hpp index 9a9ef61950..8299398c45 100644 --- a/src/util/config/ConfigDescription.hpp +++ b/src/util/config/ConfigDescription.hpp @@ -282,6 +282,9 @@ This document provides a list of all available Clio configuration properties in KV{.key = "cache.file.max_sequence_age", .value = "Max allowed difference between the latest sequence in DB and in cache file. If the cache file is " "too old (contains too low latest sequence) Clio will reject using it."}, + KV{.key = "cache.file.async_save", + .value = "When false, Clio waits for cache saving to finish before shutting down. When true, " + "cache saving runs in parallel with other shutdown operations."}, KV{.key = "log.channels.[].channel", .value = "The name of the log channel."}, KV{.key = "log.channels.[].level", .value = "The log level for the specific log channel."}, KV{.key = "log.level", diff --git a/src/util/log/Logger.cpp b/src/util/log/Logger.cpp index 5a3c2f771c..28b6cab143 100644 --- a/src/util/log/Logger.cpp +++ b/src/util/log/Logger.cpp @@ -271,6 +271,12 @@ LogServiceState::initialized() return initialized_; } +bool +LogServiceState::hasSinks() +{ + return not sinks_.empty(); +} + void LogServiceState::reset() { diff --git a/src/util/log/Logger.hpp b/src/util/log/Logger.hpp index 370625890a..9e7b61eb49 100644 --- a/src/util/log/Logger.hpp +++ b/src/util/log/Logger.hpp @@ -267,6 +267,14 @@ class LogServiceState { [[nodiscard]] static bool initialized(); + /** + * @brief Whether the LogService has any sink. If there is no sink, logger will not log messages anywhere. + * + * @return true if the LogService has at least one sink + */ + [[nodiscard]] static bool + hasSinks(); + /** * @brief Reset the logging service to uninitialized state. */ diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index fe15f63305..ba3b08f2f0 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -167,6 +167,7 @@ target_sources( util/AccountUtilsTests.cpp util/AssertTests.cpp util/BytesConverterTests.cpp + util/ChannelTests.cpp util/CoroutineTest.cpp util/MoveTrackerTests.cpp util/ObservableValueTest.cpp diff --git a/tests/unit/data/AmendmentCenterTests.cpp b/tests/unit/data/AmendmentCenterTests.cpp index 18bb2e6bb3..c5d2b1d489 100644 --- a/tests/unit/data/AmendmentCenterTests.cpp +++ b/tests/unit/data/AmendmentCenterTests.cpp @@ -104,16 +104,13 @@ TEST_F(AmendmentCenterTest, IsMultipleEnabled) }); } -TEST_F(AmendmentCenterTest, IsEnabledThrowsWhenUnavailable) +TEST_F(AmendmentCenterTest, IsEnabledReturnsFalseWhenAmendmentsLedgerObjectUnavailable) { EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::amendments().key, kSEQ, testing::_)) .WillOnce(testing::Return(std::nullopt)); runSpawn([this](auto yield) { - EXPECT_THROW( - { [[maybe_unused]] auto const result = amendmentCenter.isEnabled(yield, "irrelevant", kSEQ); }, - std::runtime_error - ); + EXPECT_NO_THROW(EXPECT_FALSE(amendmentCenter.isEnabled(yield, "irrelevant", kSEQ))); }); } @@ -126,6 +123,21 @@ TEST_F(AmendmentCenterTest, IsEnabledReturnsFalseWhenNoAmendments) runSpawn([this](auto yield) { EXPECT_FALSE(amendmentCenter.isEnabled(yield, "irrelevant", kSEQ)); }); } +TEST_F(AmendmentCenterTest, IsEnabledReturnsVectorOfFalseWhenAmendmentsLedgerObjectUnavailable) +{ + EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::amendments().key, kSEQ, testing::_)) + .WillOnce(testing::Return(std::nullopt)); + + runSpawn([this](auto yield) { + std::vector const keys{"fixUniversalNumber", "ImmediateOfferKilled"}; + std::vector vec; + EXPECT_NO_THROW(vec = amendmentCenter.isEnabled(yield, keys, kSEQ)); + + EXPECT_EQ(vec.size(), keys.size()); + EXPECT_TRUE(std::ranges::all_of(vec, std::logical_not<>{})); + }); +} + TEST_F(AmendmentCenterTest, IsEnabledReturnsVectorOfFalseWhenNoAmendments) { auto const amendments = createBrokenAmendmentsObject(); diff --git a/tests/unit/data/LedgerCacheSaverTests.cpp b/tests/unit/data/LedgerCacheSaverTests.cpp index 782792ebeb..afb6c30e0b 100644 --- a/tests/unit/data/LedgerCacheSaverTests.cpp +++ b/tests/unit/data/LedgerCacheSaverTests.cpp @@ -47,17 +47,23 @@ struct LedgerCacheSaverTest : virtual testing::Test { constexpr static auto kFILE_PATH = "./cache.bin"; static ClioConfigDefinition - generateConfig(bool cacheFilePathHasValue) + generateConfig(bool cacheFilePathHasValue, bool asyncSave) { auto config = ClioConfigDefinition{{ {"cache.file.path", ConfigValue{ConfigType::String}.optional()}, + {"cache.file.async_save", ConfigValue{ConfigType::Boolean}.defaultValue(false)}, }}; ConfigFileJson jsonFile{boost::json::object{}}; if (cacheFilePathHasValue) { - auto const jsonObject = - boost::json::parse(fmt::format(R"JSON({{"cache": {{"file": {{"path": "{}"}}}}}})JSON", kFILE_PATH)) - .as_object(); + auto const jsonObject = boost::json::parse( + fmt::format( + R"JSON({{"cache": {{"file": {{"path": "{}", "async_save": {} }} }} }})JSON", + kFILE_PATH, + asyncSave + ) + ) + .as_object(); jsonFile = ConfigFileJson{jsonObject}; } auto const errors = config.parse(jsonFile); @@ -68,7 +74,7 @@ struct LedgerCacheSaverTest : virtual testing::Test { TEST_F(LedgerCacheSaverTest, SaveSuccessfully) { - auto const config = generateConfig(true); + auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true); LedgerCacheSaver saver{config, cache}; EXPECT_CALL(cache, saveToFile(kFILE_PATH)).WillOnce(testing::Return(std::expected{})); @@ -79,7 +85,7 @@ TEST_F(LedgerCacheSaverTest, SaveSuccessfully) TEST_F(LedgerCacheSaverTest, SaveWithError) { - auto const config = generateConfig(true); + auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true); LedgerCacheSaver saver{config, cache}; EXPECT_CALL(cache, saveToFile(kFILE_PATH)) @@ -91,7 +97,7 @@ TEST_F(LedgerCacheSaverTest, SaveWithError) TEST_F(LedgerCacheSaverTest, NoSaveWhenPathNotConfigured) { - auto const config = generateConfig(false); + auto const config = generateConfig(/* cacheFilePathHasValue = */ false, /* asyncSave = */ true); LedgerCacheSaver saver{config, cache}; saver.save(); @@ -100,7 +106,7 @@ TEST_F(LedgerCacheSaverTest, NoSaveWhenPathNotConfigured) TEST_F(LedgerCacheSaverTest, DestructorWaitsForCompletion) { - auto const config = generateConfig(true); + auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true); std::binary_semaphore semaphore{1}; std::atomic_bool saveCompleted{false}; @@ -123,7 +129,7 @@ TEST_F(LedgerCacheSaverTest, DestructorWaitsForCompletion) TEST_F(LedgerCacheSaverTest, WaitToFinishCanBeCalledMultipleTimes) { - auto const config = generateConfig(true); + auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true); LedgerCacheSaver saver{config, cache}; EXPECT_CALL(cache, saveToFile(kFILE_PATH)); @@ -135,7 +141,7 @@ TEST_F(LedgerCacheSaverTest, WaitToFinishCanBeCalledMultipleTimes) TEST_F(LedgerCacheSaverTest, WaitToFinishWithoutSaveIsSafe) { - auto const config = generateConfig(true); + auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true); LedgerCacheSaver saver{config, cache}; EXPECT_NO_THROW(saver.waitToFinish()); } @@ -144,13 +150,61 @@ struct LedgerCacheSaverAssertTest : LedgerCacheSaverTest, common::util::WithMock TEST_F(LedgerCacheSaverAssertTest, MultipleSavesNotAllowed) { - auto const config = generateConfig(true); + auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true); LedgerCacheSaver saver{config, cache}; + std::binary_semaphore semaphore{0}; - EXPECT_CALL(cache, saveToFile(kFILE_PATH)); + EXPECT_CALL(cache, saveToFile(kFILE_PATH)).WillOnce([&](auto&&) { + semaphore.acquire(); + return std::expected{}; + }); saver.save(); EXPECT_CLIO_ASSERT_FAIL({ saver.save(); }); + semaphore.release(); + + saver.waitToFinish(); +} + +TEST_F(LedgerCacheSaverTest, SyncSaveWaitsForCompletion) +{ + auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ false); + + std::atomic_bool saveCompleted{false}; + + EXPECT_CALL(cache, saveToFile(kFILE_PATH)).WillOnce([&]() { + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + saveCompleted = true; + return std::expected{}; + }); + + LedgerCacheSaver saver{config, cache}; + saver.save(); + EXPECT_TRUE(saveCompleted); +} + +TEST_F(LedgerCacheSaverTest, AsyncSaveDoesNotWaitForCompletion) +{ + auto const config = generateConfig(/* cacheFilePathHasValue = */ true, /* asyncSave = */ true); + std::binary_semaphore saveStarted{0}; + std::binary_semaphore continueExecution{0}; + std::atomic_bool saveCompleted{false}; + + EXPECT_CALL(cache, saveToFile(kFILE_PATH)).WillOnce([&]() { + saveStarted.release(); + continueExecution.acquire(); + saveCompleted = true; + return std::expected{}; + }); + + LedgerCacheSaver saver{config, cache}; + saver.save(); + + EXPECT_TRUE(saveStarted.try_acquire_for(std::chrono::seconds{5})); + EXPECT_FALSE(saveCompleted); + + continueExecution.release(); saver.waitToFinish(); + EXPECT_TRUE(saveCompleted); } diff --git a/tests/unit/rpc/WorkQueueTests.cpp b/tests/unit/rpc/WorkQueueTests.cpp index d32422e6cc..c716c808d0 100644 --- a/tests/unit/rpc/WorkQueueTests.cpp +++ b/tests/unit/rpc/WorkQueueTests.cpp @@ -18,6 +18,7 @@ //============================================================================== #include "rpc/WorkQueue.hpp" +#include "util/MockAssert.hpp" #include "util/MockPrometheus.hpp" #include "util/config/ConfigDefinition.hpp" #include "util/config/ConfigValue.hpp" @@ -29,10 +30,12 @@ #include #include +#include #include #include #include #include +#include #include using namespace util; @@ -111,7 +114,32 @@ TEST_F(WorkQueueTest, NonWhitelistedPreventSchedulingAtQueueLimitExceeded) EXPECT_TRUE(unblocked); } -struct WorkQueuePriorityTest : WithPrometheus, virtual ::testing::Test { +struct WorkQueueDelayedStartTest : WithPrometheus { + WorkQueue queue{WorkQueue::kDONT_START_PROCESSING_TAG, /* numWorkers = */ 1, /* maxSize = */ 100}; +}; + +TEST_F(WorkQueueDelayedStartTest, WaitTimeIncludesDelayBeforeStartProcessing) +{ + std::atomic_bool taskExecuted = false; + + ASSERT_TRUE(queue.postCoro( + [&taskExecuted](auto /* yield */) { taskExecuted = true; }, + /* isWhiteListed = */ true + )); + + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + queue.startProcessing(); + queue.stop(); + + EXPECT_TRUE(taskExecuted); + + auto const report = queue.report(); + auto const durationUs = report.at("queued_duration_us").as_uint64(); + + EXPECT_GE(durationUs, 50000u) << "Wait time should include the delay before startProcessing"; +} + +struct WorkQueuePriorityTest : WithPrometheus { WorkQueue queue{WorkQueue::kDONT_START_PROCESSING_TAG, /* numWorkers = */ 1, /* maxSize = */ 100}; }; @@ -207,11 +235,7 @@ TEST_F(WorkQueueStopTest, CallsOnTasksCompleteWhenStoppingOnLastTask) queue.stop(); } -struct WorkQueueMockPrometheusTest : WithMockPrometheus, RPCWorkQueueTestBase { - WorkQueueMockPrometheusTest() : RPCWorkQueueTestBase(/* workers = */ 1, /*maxQueueSize = */ 2) - { - } -}; +struct WorkQueueMockPrometheusTest : WithMockPrometheus {}; TEST_F(WorkQueueMockPrometheusTest, postCoroCounters) { @@ -221,17 +245,40 @@ TEST_F(WorkQueueMockPrometheusTest, postCoroCounters) std::binary_semaphore semaphore{0}; - EXPECT_CALL(curSizeMock, value()).WillOnce(::testing::Return(0)).WillRepeatedly(::testing::Return(1)); + EXPECT_CALL(curSizeMock, value()) + .WillOnce(::testing::Return(0)) // in startProcessing + .WillOnce(::testing::Return(0)); // first check in postCoro EXPECT_CALL(curSizeMock, add(1)); EXPECT_CALL(queuedMock, add(1)); EXPECT_CALL(durationMock, add(::testing::Ge(0))).WillOnce([&](auto) { EXPECT_CALL(curSizeMock, add(-1)); - EXPECT_CALL(curSizeMock, value()).WillOnce(::testing::Return(0)); semaphore.release(); }); + // Note: the queue is not in the fixture because above expectations must be setup before startProcessing runs + WorkQueue queue(/* numWorkers = */ 4, /* maxSize = */ 2); auto const res = queue.postCoro([&](auto /* yield */) { semaphore.acquire(); }, /* isWhiteListed = */ false); ASSERT_TRUE(res); queue.stop(); } + +// Note: not using EXPECT_CLIO_ASSERT_FAIL because exception is swallowed by the WQ context +// TODO [https://github.com/XRPLF/clio/issues/2906]: Enable the test once we figure out a better way to do it without +// using up >2 minutes of CI time +struct WorkQueueDeathTest : WorkQueueMockPrometheusTest, common::util::WithMockAssert {}; +TEST_F(WorkQueueDeathTest, DISABLED_ExecuteTaskAssertsWhenQueueIsEmpty) +{ + [[maybe_unused]] auto& queuedMock = makeMock("work_queue_queued_total_number", ""); + [[maybe_unused]] auto& durationMock = makeMock("work_queue_cumulative_tasks_duration_us", ""); + auto& curSizeMock = makeMock("work_queue_current_size", ""); + + EXPECT_CALL(curSizeMock, value()).WillRepeatedly(::testing::Return(1)); // lie about the size + EXPECT_DEATH( + { + WorkQueue queue(WorkQueue::kDONT_START_PROCESSING_TAG, /* numWorkers = */ 1, /* maxSize = */ 2); + queue.startProcessing(); // the actual queue is empty which will lead to assertion failure + }, + ".*" + ); +} diff --git a/tests/unit/util/ChannelTests.cpp b/tests/unit/util/ChannelTests.cpp new file mode 100644 index 0000000000..13809c4530 --- /dev/null +++ b/tests/unit/util/ChannelTests.cpp @@ -0,0 +1,756 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2025, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include "util/Assert.hpp" +#include "util/Channel.hpp" +#include "util/Mutex.hpp" +#include "util/OverloadSet.hpp" +#include "util/Spawn.hpp" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace testing; + +namespace { + +constexpr auto kDEFAULT_THREAD_POOL_SIZE = 4; +constexpr auto kTEST_TIMEOUT = std::chrono::seconds{10}; + +constexpr auto kNUM_SENDERS = 3uz; +constexpr auto kNUM_RECEIVERS = 3uz; +constexpr auto kVALUES_PER_SENDER = 500uz; +constexpr auto kTOTAL_EXPECTED = kNUM_SENDERS * kVALUES_PER_SENDER; + +enum class ContextType { IOContext, ThreadPool }; + +constexpr int +generateValue(std::size_t senderId, std::size_t i) +{ + return static_cast((senderId * 100) + i); +} + +std::vector +generateExpectedValues() +{ + std::vector expectedValues; + expectedValues.reserve(kTOTAL_EXPECTED); + for (auto senderId = 0uz; senderId < kNUM_SENDERS; ++senderId) { + for (auto i = 0uz; i < kVALUES_PER_SENDER; ++i) { + expectedValues.push_back(generateValue(senderId, i)); + } + } + std::ranges::sort(expectedValues); + return expectedValues; +} + +std::vector const kEXPECTED_VALUES = generateExpectedValues(); + +std::string +contextTypeToString(ContextType type) +{ + return type == ContextType::IOContext ? "IOContext" : "ThreadPool"; +} + +class ContextWrapper { +public: + using ContextVariant = std::variant; + + explicit ContextWrapper(ContextType type) + : context_([type] { + if (type == ContextType::IOContext) + return ContextVariant(std::in_place_type_t()); + + if (type == ContextType::ThreadPool) + return ContextVariant(std::in_place_type_t(), kDEFAULT_THREAD_POOL_SIZE); + + ASSERT(false, "Unknown new type of context"); + std::unreachable(); + }()) + { + } + + template + void + withExecutor(Fn&& fn) + { + std::visit(std::forward(fn), context_); + } + + void + run() + { + std::visit( + util::OverloadSet{ + [](boost::asio::io_context& context) { context.run_for(kTEST_TIMEOUT); }, + [](boost::asio::thread_pool& context) { context.join(); }, + }, + context_ + ); + } + +private: + ContextVariant context_; +}; + +} // namespace + +class ChannelSpawnTest : public TestWithParam { +protected: + ChannelSpawnTest() : context_(GetParam()) + { + } + + ContextWrapper context_; +}; + +class ChannelCallbackTest : public TestWithParam { +protected: + ChannelCallbackTest() : context_(GetParam()) + { + } + + ContextWrapper context_; +}; + +TEST_P(ChannelSpawnTest, MultipleSendersOneReceiver) +{ + context_.withExecutor([this](auto& executor) { + auto [sender, receiver] = util::Channel::create(executor, 10); + util::Mutex> receivedValues; + + util::spawn(executor, [&receiver, &receivedValues](boost::asio::yield_context yield) mutable { + while (true) { + auto value = receiver.asyncReceive(yield); + if (not value.has_value()) + break; + receivedValues.lock()->push_back(*value); + } + }); + + { + auto localSender = std::move(sender); + for (auto senderId = 0uz; senderId < kNUM_SENDERS; ++senderId) { + util::spawn(executor, [senderCopy = localSender, senderId](boost::asio::yield_context yield) mutable { + for (auto i = 0uz; i < kVALUES_PER_SENDER; ++i) { + if (not senderCopy.asyncSend(generateValue(senderId, i), yield)) + break; + } + }); + } + } + + context_.run(); + + EXPECT_EQ(receivedValues.lock()->size(), kTOTAL_EXPECTED); + std::ranges::sort(receivedValues.lock().get()); + + EXPECT_EQ(receivedValues.lock().get(), kEXPECTED_VALUES); + }); +} + +TEST_P(ChannelSpawnTest, MultipleSendersMultipleReceivers) +{ + context_.withExecutor([this](auto& executor) { + auto [sender, receiver] = util::Channel::create(executor, 10); + util::Mutex> receivedValues; + std::vector receivers(kNUM_RECEIVERS, receiver); + + for (auto receiverId = 0uz; receiverId < kNUM_RECEIVERS; ++receiverId) { + util::spawn( + executor, + [&receiverRef = receivers[receiverId], &receivedValues](boost::asio::yield_context yield) mutable { + while (true) { + auto value = receiverRef.asyncReceive(yield); + if (not value.has_value()) + break; + receivedValues.lock()->push_back(*value); + } + } + ); + } + + { + auto localSender = std::move(sender); + for (auto senderId = 0uz; senderId < kNUM_SENDERS; ++senderId) { + util::spawn(executor, [senderCopy = localSender, senderId](boost::asio::yield_context yield) mutable { + for (auto i = 0uz; i < kVALUES_PER_SENDER; ++i) { + auto const value = generateValue(senderId, i); + if (not senderCopy.asyncSend(value, yield)) + break; + } + }); + } + } + + context_.run(); + + EXPECT_EQ(receivedValues.lock()->size(), kTOTAL_EXPECTED); + std::ranges::sort(receivedValues.lock().get()); + + EXPECT_EQ(receivedValues.lock().get(), kEXPECTED_VALUES); + }); +} + +TEST_P(ChannelSpawnTest, ChannelClosureScenarios) +{ + context_.withExecutor([this](auto& executor) { + std::atomic_bool testCompleted{false}; + + util::spawn(executor, [&executor, &testCompleted](boost::asio::yield_context yield) mutable { + auto [sender, receiver] = util::Channel::create(executor, 5); + + EXPECT_FALSE(receiver.isClosed()); + + bool const success = sender.asyncSend(42, yield); + EXPECT_TRUE(success); + + auto value = receiver.asyncReceive(yield); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, 42); + + { + [[maybe_unused]] auto tempSender = std::move(sender); + } + + EXPECT_TRUE(receiver.isClosed()); + + auto closedValue = receiver.asyncReceive(yield); + EXPECT_FALSE(closedValue.has_value()); + + testCompleted = true; + }); + + context_.run(); + EXPECT_TRUE(testCompleted); + }); +} + +TEST_P(ChannelSpawnTest, TrySendTryReceiveMethods) +{ + context_.withExecutor([this](auto& executor) { + std::atomic_bool testCompleted{false}; + + util::spawn(executor, [&executor, &testCompleted](boost::asio::yield_context) mutable { + auto [sender, receiver] = util::Channel::create(executor, 3); + + EXPECT_FALSE(receiver.tryReceive().has_value()); + + EXPECT_TRUE(sender.trySend(42)); + EXPECT_TRUE(sender.trySend(43)); + EXPECT_TRUE(sender.trySend(44)); + EXPECT_FALSE(sender.trySend(45)); // channel full + + auto value1 = receiver.tryReceive(); + EXPECT_TRUE(value1.has_value()); + EXPECT_EQ(*value1, 42); + + auto value2 = receiver.tryReceive(); + EXPECT_TRUE(value2.has_value()); + EXPECT_EQ(*value2, 43); + + EXPECT_TRUE(sender.trySend(46)); + + auto value3 = receiver.tryReceive(); + EXPECT_TRUE(value3.has_value()); + EXPECT_EQ(*value3, 44); + + auto value4 = receiver.tryReceive(); + EXPECT_TRUE(value4.has_value()); + EXPECT_EQ(*value4, 46); + + EXPECT_FALSE(receiver.tryReceive().has_value()); + + testCompleted = true; + }); + + context_.run(); + EXPECT_TRUE(testCompleted); + }); +} + +TEST_P(ChannelSpawnTest, TryMethodsWithClosedChannel) +{ + context_.withExecutor([this](auto& executor) { + std::atomic_bool testCompleted{false}; + + util::spawn(executor, [&executor, &testCompleted](boost::asio::yield_context) mutable { + auto [sender, receiver] = util::Channel::create(executor, 3); + + EXPECT_TRUE(sender.trySend(42)); + EXPECT_TRUE(sender.trySend(43)); + + { + [[maybe_unused]] auto tempSender = std::move(sender); + } + + EXPECT_TRUE(receiver.isClosed()); + + auto value1 = receiver.tryReceive(); + EXPECT_TRUE(value1.has_value()); + EXPECT_EQ(*value1, 42); + + auto value2 = receiver.tryReceive(); + EXPECT_TRUE(value2.has_value()); + EXPECT_EQ(*value2, 43); + + EXPECT_FALSE(receiver.tryReceive().has_value()); + + testCompleted = true; + }); + + context_.run(); + EXPECT_TRUE(testCompleted); + }); +} + +INSTANTIATE_TEST_SUITE_P( + SpawnTests, + ChannelSpawnTest, + Values(ContextType::IOContext, ContextType::ThreadPool), + [](TestParamInfo const& info) { return contextTypeToString(info.param); } +); + +TEST_P(ChannelCallbackTest, MultipleSendersOneReceiver) +{ + context_.withExecutor([this](auto& executor) { + auto [sender, receiver] = util::Channel::create(executor, 10); + util::Mutex> receivedValues; + + auto receiveNext = [&receiver, &receivedValues](this auto&& self) -> void { + if (receivedValues.lock()->size() >= kTOTAL_EXPECTED) + return; + + receiver.asyncReceive([&receivedValues, self = std::forward(self)](auto value) { + if (value.has_value()) { + receivedValues.lock()->push_back(*value); + self(); + } + }); + }; + + boost::asio::post(executor, receiveNext); + + { + auto localSender = std::move(sender); + for (auto senderId = 0uz; senderId < kNUM_SENDERS; ++senderId) { + auto senderCopy = localSender; + boost::asio::post(executor, [senderCopy = std::move(senderCopy), senderId, &executor]() mutable { + auto sendNext = [senderCopy = std::move(senderCopy), + senderId, + &executor](this auto&& self, std::size_t i) -> void { + if (i >= kVALUES_PER_SENDER) + return; + + senderCopy.asyncSend( + generateValue(senderId, i), + [self = std::forward(self), &executor, i](bool success) mutable { + if (success) + boost::asio::post(executor, [self = std::move(self), i]() mutable { self(i + 1); }); + } + ); + }; + sendNext(0); + }); + } + } + + context_.run(); + + EXPECT_EQ(receivedValues.lock()->size(), kTOTAL_EXPECTED); + std::ranges::sort(receivedValues.lock().get()); + + EXPECT_EQ(receivedValues.lock().get(), kEXPECTED_VALUES); + }); +} + +TEST_P(ChannelCallbackTest, MultipleSendersMultipleReceivers) +{ + context_.withExecutor([this](auto& executor) { + auto [sender, receiver] = util::Channel::create(executor, 10); + util::Mutex> receivedValues; + std::vector receivers(kNUM_RECEIVERS, receiver); + + for (auto receiverId = 0uz; receiverId < kNUM_RECEIVERS; ++receiverId) { + auto& receiverRef = receivers[receiverId]; + auto receiveNext = [&receiverRef, &receivedValues](this auto&& self) -> void { + receiverRef.asyncReceive([&receivedValues, self = std::forward(self)](auto value) { + if (value.has_value()) { + receivedValues.lock()->push_back(*value); + self(); + } + }); + }; + boost::asio::post(executor, receiveNext); + } + + { + auto localSender = std::move(sender); + for (auto senderId = 0uz; senderId < kNUM_SENDERS; ++senderId) { + auto senderCopy = localSender; + boost::asio::post(executor, [senderCopy = std::move(senderCopy), senderId, &executor]() mutable { + auto sendNext = [senderCopy = std::move(senderCopy), + senderId, + &executor](this auto&& self, std::size_t i) -> void { + if (i >= kVALUES_PER_SENDER) + return; + + senderCopy.asyncSend( + generateValue(senderId, i), + [self = std::forward(self), &executor, i](bool success) mutable { + if (success) + boost::asio::post(executor, [self = std::move(self), i]() mutable { self(i + 1); }); + } + ); + }; + sendNext(0); + }); + } + } + + context_.run(); + + EXPECT_EQ(receivedValues.lock()->size(), kTOTAL_EXPECTED); + std::ranges::sort(receivedValues.lock().get()); + + EXPECT_EQ(receivedValues.lock().get(), kEXPECTED_VALUES); + }); +} + +TEST_P(ChannelCallbackTest, ChannelClosureScenarios) +{ + context_.withExecutor([this](auto& executor) { + std::atomic_bool testCompleted{false}; + auto [sender, receiver] = util::Channel::create(executor, 5); + auto receiverPtr = std::make_shared(std::move(receiver)); + auto senderPtr = std::make_shared>(std::move(sender)); + + EXPECT_FALSE(receiverPtr->isClosed()); + + senderPtr->value().asyncSend(42, [&executor, receiverPtr, senderPtr, &testCompleted](bool success) { + EXPECT_TRUE(success); + + receiverPtr->asyncReceive([&executor, receiverPtr, senderPtr, &testCompleted](auto value) { + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, 42); + + boost::asio::post(executor, [&executor, receiverPtr, senderPtr, &testCompleted]() { + senderPtr->reset(); + EXPECT_TRUE(receiverPtr->isClosed()); + + boost::asio::post(executor, [receiverPtr, &testCompleted]() { + receiverPtr->asyncReceive([&testCompleted](auto closedValue) { + EXPECT_FALSE(closedValue.has_value()); + testCompleted = true; + }); + }); + }); + }); + }); + + context_.run(); + EXPECT_TRUE(testCompleted); + }); +} + +TEST_P(ChannelCallbackTest, TrySendTryReceiveMethods) +{ + context_.withExecutor([this](auto& executor) { + std::atomic_bool testCompleted{false}; + auto [sender, receiver] = util::Channel::create(executor, 2); + auto receiverPtr = std::make_shared(std::move(receiver)); + auto senderPtr = std::make_shared(std::move(sender)); + + boost::asio::post(executor, [receiverPtr, senderPtr, &testCompleted]() { + EXPECT_FALSE(receiverPtr->tryReceive().has_value()); + + EXPECT_TRUE(senderPtr->trySend(100)); + EXPECT_TRUE(senderPtr->trySend(101)); + EXPECT_FALSE(senderPtr->trySend(102)); // channel full + + auto value1 = receiverPtr->tryReceive(); + EXPECT_TRUE(value1.has_value()); + EXPECT_EQ(*value1, 100); + + EXPECT_TRUE(senderPtr->trySend(103)); + + auto value2 = receiverPtr->tryReceive(); + EXPECT_TRUE(value2.has_value()); + EXPECT_EQ(*value2, 101); + + auto value3 = receiverPtr->tryReceive(); + EXPECT_TRUE(value3.has_value()); + EXPECT_EQ(*value3, 103); + + testCompleted = true; + }); + + context_.run(); + EXPECT_TRUE(testCompleted); + }); +} + +TEST_P(ChannelCallbackTest, TryMethodsWithClosedChannel) +{ + context_.withExecutor([this](auto& executor) { + std::atomic_bool testCompleted{false}; + auto [sender, receiver] = util::Channel::create(executor, 3); + auto receiverPtr = std::make_shared(std::move(receiver)); + auto senderPtr = std::make_shared>(std::move(sender)); + + boost::asio::post(executor, [receiverPtr, senderPtr, &testCompleted]() { + EXPECT_TRUE(senderPtr->value().trySend(100)); + EXPECT_TRUE(senderPtr->value().trySend(101)); + + senderPtr->reset(); + + EXPECT_TRUE(receiverPtr->isClosed()); + + auto value1 = receiverPtr->tryReceive(); + EXPECT_TRUE(value1.has_value()); + EXPECT_EQ(*value1, 100); + + auto value2 = receiverPtr->tryReceive(); + EXPECT_TRUE(value2.has_value()); + EXPECT_EQ(*value2, 101); + + EXPECT_FALSE(receiverPtr->tryReceive().has_value()); + + testCompleted = true; + }); + + context_.run(); + EXPECT_TRUE(testCompleted); + }); +} + +INSTANTIATE_TEST_SUITE_P( + CallbackTests, + ChannelCallbackTest, + Values(ContextType::IOContext, ContextType::ThreadPool), + [](TestParamInfo const& info) { return contextTypeToString(info.param); } +); + +TEST(ChannelTest, MultipleSenderCopiesErrorHandling) +{ + boost::asio::io_context executor; + bool testCompleted = false; + + util::spawn(executor, [&executor, &testCompleted](boost::asio::yield_context yield) mutable { + auto [sender, receiver] = util::Channel::create(executor, 5); + + bool const success = sender.asyncSend(42, yield); + EXPECT_TRUE(success); + + auto value = receiver.asyncReceive(yield); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, 42); + + auto senderCopy = sender; + { + [[maybe_unused]] auto tempSender = std::move(sender); + // tempSender destroyed here, but senderCopy still exists + } + + EXPECT_FALSE(receiver.isClosed()); + + { + [[maybe_unused]] auto tempSender = std::move(senderCopy); + // now all senders are destroyed, channel should close + } + + EXPECT_TRUE(receiver.isClosed()); + + auto closedValue = receiver.asyncReceive(yield); + EXPECT_FALSE(closedValue.has_value()); + + testCompleted = true; + }); + + executor.run_for(kTEST_TIMEOUT); + EXPECT_TRUE(testCompleted); +} + +TEST(ChannelTest, ChannelClosesWhenAllSendersDestroyed) +{ + boost::asio::io_context executor; + auto [sender, receiver] = util::Channel::create(executor, 5); + + EXPECT_FALSE(receiver.isClosed()); + + auto senderCopy = sender; + { + [[maybe_unused]] auto temp = std::move(sender); + } + EXPECT_FALSE(receiver.isClosed()); // one sender still exists + + { + [[maybe_unused]] auto temp = std::move(senderCopy); + } + EXPECT_TRUE(receiver.isClosed()); // all senders destroyed +} + +TEST(ChannelTest, ChannelClosesWhenAllReceiversDestroyed) +{ + boost::asio::io_context executor; + auto [sender, receiver] = util::Channel::create(executor, 5); + + EXPECT_TRUE(sender.trySend(42)); + + auto receiverCopy = receiver; + { + [[maybe_unused]] auto temp = std::move(receiver); + } + EXPECT_TRUE(sender.trySend(43)); // one receiver still exists, can send + + { + [[maybe_unused]] auto temp = std::move(receiverCopy); + } + EXPECT_FALSE(sender.trySend(44)); // all receivers destroyed, channel closed +} + +TEST(ChannelTest, ChannelPreservesOrderFIFO) +{ + boost::asio::io_context executor; + bool testCompleted = false; + std::vector const valuesToSend = {42, 7, 99, 13, 5, 88, 21, 3, 67, 54}; + + util::spawn(executor, [&executor, &testCompleted, &valuesToSend](boost::asio::yield_context yield) mutable { + auto [sender, receiver] = util::Channel::create(executor, 5); + std::vector receivedValues; + + // Spawn a receiver coroutine that collects all values + util::spawn(executor, [&receiver, &receivedValues](boost::asio::yield_context yield) mutable { + auto value = receiver.asyncReceive(yield); + while (value.has_value()) { + receivedValues.push_back(*value); + value = receiver.asyncReceive(yield); + } + }); + + // Send all values + for (int const value : valuesToSend) { + EXPECT_TRUE(sender.asyncSend(value, yield)); + } + + // Close sender to signal end of data + { + [[maybe_unused]] auto temp = std::move(sender); + } + + // Give receiver time to process all values + boost::asio::steady_timer timer(executor, std::chrono::milliseconds{50}); + timer.async_wait(yield); + + // Verify received values match sent values in the same order + EXPECT_EQ(receivedValues, valuesToSend); + + testCompleted = true; + }); + + executor.run_for(kTEST_TIMEOUT); + EXPECT_TRUE(testCompleted); +} + +TEST(ChannelTest, AsyncReceiveWakesUpWhenSenderDestroyed) +{ + boost::asio::io_context executor; + bool testCompleted = false; + auto [sender, receiver] = util::Channel::create(executor, 5); + auto senderPtr = std::make_shared(std::move(sender)); + + util::spawn( + executor, + [&receiver, senderPtr = std::move(senderPtr), &testCompleted, &executor](boost::asio::yield_context) mutable { + // Start receiving - this will block because no data is sent + auto receiveTask = [&receiver, &testCompleted](boost::asio::yield_context yield) { + auto const value = receiver.asyncReceive(yield); + EXPECT_FALSE(value.has_value()); // Should receive nullopt when sender is destroyed + testCompleted = true; + }; + + util::spawn(executor, receiveTask); + + senderPtr.reset(); + } + ); + + executor.run_for(kTEST_TIMEOUT); + EXPECT_TRUE(testCompleted); +} + +// This test verifies the workaround for a bug in boost::asio::experimental::concurrent_channel where close() does not +// cancel pending async operations. Our Channel wrapper calls cancel() after close() to ensure pending operations are +// unblocked. +// See: https://github.com/chriskohlhoff/asio/issues/1575 +TEST(ChannelTest, PendingAsyncSendsAreCancelledOnClose) +{ + boost::asio::thread_pool pool{4}; + static constexpr auto kPENDING_NUM_SENDERS = 10uz; + + // Channel with capacity 0 - all sends will block waiting for a receiver + auto [sender, receiver] = util::Channel::create(pool, 0); + + std::atomic completedSends{0}; + std::counting_semaphore semaphore{kPENDING_NUM_SENDERS}; + + // Spawn multiple senders that will all block (no receiver is consuming) + for (auto i = 0uz; i < kPENDING_NUM_SENDERS; ++i) { + util::spawn( + pool, [senderCopy = sender, i, &completedSends, &semaphore](boost::asio::yield_context yield) mutable { + semaphore.release(1); + EXPECT_FALSE(senderCopy.asyncSend(static_cast(i), yield)); + ++completedSends; + } + ); + } + + semaphore.acquire(); + + // Close the channel by destroying the only receiver we have. + // Our workaround calls cancel() after close() to unblock pending operations + { + [[maybe_unused]] auto r = std::move(receiver); + } + + // All senders should complete (unblocked by our cancel() workaround) + pool.join(); + + // All sends should have completed (returned false due to closed channel) + EXPECT_EQ(completedSends, kPENDING_NUM_SENDERS); +} + +INSTANTIATE_CHANNEL_FOR_CLANG(int);