diff --git a/.devcontainer/cuda11.8-conda/devcontainer.json b/.devcontainer/cuda11.8-conda/devcontainer.json index 4f892980..1bd344fb 100644 --- a/.devcontainer/cuda11.8-conda/devcontainer.json +++ b/.devcontainer/cuda11.8-conda/devcontainer.json @@ -11,7 +11,9 @@ "runArgs": [ "--rm", "--name", - "${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.06-cuda11.8-conda" + "${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.06-cuda11.8-conda", + "--ulimit", + "nofile=1048576:1048576" ], "hostRequirements": {"gpu": "optional"}, "features": { diff --git a/.devcontainer/cuda11.8-pip/devcontainer.json b/.devcontainer/cuda11.8-pip/devcontainer.json index 938f0aac..b7bb0f9e 100644 --- a/.devcontainer/cuda11.8-pip/devcontainer.json +++ b/.devcontainer/cuda11.8-pip/devcontainer.json @@ -11,7 +11,9 @@ "runArgs": [ "--rm", "--name", - "${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.06-cuda11.8-pip" + "${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.06-cuda11.8-pip", + "--ulimit", + "nofile=1048576:1048576" ], "hostRequirements": {"gpu": "optional"}, "features": { diff --git a/.devcontainer/cuda12.8-conda/devcontainer.json b/.devcontainer/cuda12.8-conda/devcontainer.json index e5c11d18..cc85d297 100644 --- a/.devcontainer/cuda12.8-conda/devcontainer.json +++ b/.devcontainer/cuda12.8-conda/devcontainer.json @@ -11,7 +11,9 @@ "runArgs": [ "--rm", "--name", - "${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.06-cuda12.8-conda" + "${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.06-cuda12.8-conda", + "--ulimit", + "nofile=1048576:1048576" ], "hostRequirements": {"gpu": "optional"}, "features": { diff --git a/.devcontainer/cuda12.8-pip/devcontainer.json b/.devcontainer/cuda12.8-pip/devcontainer.json index 05571da6..24904202 100644 --- a/.devcontainer/cuda12.8-pip/devcontainer.json +++ b/.devcontainer/cuda12.8-pip/devcontainer.json @@ -11,7 +11,9 @@ "runArgs": [ "--rm", "--name", - "${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.06-cuda12.8-pip" + "${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.06-cuda12.8-pip", + "--ulimit", + "nofile=1048576:1048576" ], "hostRequirements": {"gpu": "optional"}, "features": { diff --git a/.devcontainer/rapids.Dockerfile b/.devcontainer/rapids.Dockerfile index 2a8435ed..aa9bed6a 100644 --- a/.devcontainer/rapids.Dockerfile +++ b/.devcontainer/rapids.Dockerfile @@ -30,6 +30,8 @@ ENV DEFAULT_CONDA_ENV=rapids FROM ${PYTHON_PACKAGE_MANAGER}-base +ARG TARGETARCH + ARG CUDA ENV CUDAARCHS="RAPIDS" ENV CUDA_VERSION="${CUDA_VERSION:-${CUDA}}" @@ -43,9 +45,21 @@ ENV PYTHONDONTWRITEBYTECODE="1" ENV SCCACHE_REGION="us-east-2" ENV SCCACHE_BUCKET="rapids-sccache-devs" -ENV SCCACHE_IDLE_TIMEOUT=900 +ENV SCCACHE_DIST_CONNECT_TIMEOUT=30 +ENV SCCACHE_DIST_REQUEST_TIMEOUT=1800 +ENV SCCACHE_DIST_URL="https://${TARGETARCH}.linux.sccache.gha-runners.nvidia.com" +ENV SCCACHE_IDLE_TIMEOUT=1800 ENV AWS_ROLE_ARN="arn:aws:iam::279114543810:role/nv-gha-token-sccache-devs" ENV HISTFILE="/home/coder/.cache/._bash_history" ENV LIBCUDF_KERNEL_CACHE_PATH="/home/coder/cudf/cpp/build/${PYTHON_PACKAGE_MANAGER}/cuda-${CUDA_VERSION}/latest/jitify_cache" + +# Prevent the sccache server from shutting down +ENV SCCACHE_IDLE_TIMEOUT=0 +ENV SCCACHE_SERVER_LOG="sccache=info" +ENV SCCACHE_S3_KEY_PREFIX=rapids-test-sccache-dist + +# Build as much in parallel as possible +ENV INFER_NUM_DEVICE_ARCHITECTURES=1 +ENV MAX_DEVICE_OBJ_TO_COMPILE_IN_PARALLEL=20 diff --git a/.github/actions/build-and-test-feature/action.yml b/.github/actions/build-and-test-feature/action.yml index 70d11f73..6f3e73ce 100644 --- a/.github/actions/build-and-test-feature/action.yml +++ b/.github/actions/build-and-test-feature/action.yml @@ -8,6 +8,7 @@ inputs: aws_role_arn: {type: string, defaut: '', required: false} rw_sccache_bucket: {type: string, defaut: '', required: false} rw_sccache_region: {type: string, defaut: '', required: false} + sccache_dist_scheduler_url: {type: string, defaut: '', required: false} runs: using: composite @@ -32,3 +33,4 @@ runs: aws_role_arn: "${{ inputs.aws_role_arn }}" rw_sccache_bucket: "${{ inputs.rw_sccache_bucket }}" rw_sccache_region: "${{ inputs.rw_sccache_region }}" + sccache_dist_scheduler_url: "${{ inputs.sccache_dist_scheduler_url }}" diff --git a/.github/workflows/build-all-rapids-repos.yml b/.github/workflows/build-all-rapids-repos.yml index 6ca6ee33..7743c200 100644 --- a/.github/workflows/build-all-rapids-repos.yml +++ b/.github/workflows/build-all-rapids-repos.yml @@ -37,25 +37,55 @@ jobs: with: arch: '["amd64", "arm64"]' cuda: '["12.8"]' - node_type: cpu32 + node_type: cpu8 extra-repo-deploy-key: CUMLPRIMS_SSH_PRIVATE_DEPLOY_KEY + rapids-aux-secret-1: GIST_REPO_READ_ORG_GITHUB_TOKEN timeout-minutes: 720 - # Prevent the sccache server from shutting down + # 1. Prevent the sccache server from shutting down + # 2. Infinitely retry transient errors + # 3. Never fallback to locally compiling env: | SCCACHE_IDLE_TIMEOUT=0 + SCCACHE_DIST_MAX_RETRIES=inf + SCCACHE_DIST_FALLBACK_TO_LOCAL_COMPILE=false build_command: | - SCCACHE_NO_DAEMON=1 sccache --stop-server - sccache -z; + # Install the latest sccache client + devcontainer-utils-install-sccache --repo trxcllnt/sccache; + + # Configure the sccache client to talk to the build cluster + devcontainer-utils-init-sccache-dist \ + --enable-sccache-dist - <<< " \ + --auth-type 'token' \ + --auth-token '$RAPIDS_AUX_SECRET_1' \ + "; + + # Verify sccache cache location sccache --show-adv-stats; - clone-all -j$(nproc) -v -q --clone-upstream --single-branch --shallow-submodules; - build-all \ - -Wno-dev \ - -j$(nproc --ignore=1) \ - -DBUILD_SHARED_LIBS=ON \ - -DBUILD_TESTS=ON \ - -DBUILD_BENCHMARKS=ON \ - -DBUILD_PRIMS_BENCH=ON \ - -DRAFT_COMPILE_LIBRARY=ON \ - -DBUILD_CUGRAPH_MG_TESTS=ON \ - ; + + # Clone all the repos + clone-all -j$(nproc) -v -q --clone-upstream --depth 1 --single-branch --shallow-submodules --no-update-env; + + # Create the python env without ninja. + # ninja -j10000 fails with `ninja: FATAL: pipe: Too many open files`. + # This appears to have been fixed 13 years ago (https://github.com/ninja-build/ninja/issues/233), + # so that fix needs to be integrated into the kitware pip ninja builds. + rapids-post-start-command --exclude <(echo ninja); + + set -x; + + # Configure all the C++ libs + configure-all \ + -j$(ulimit -Hn) \ + -Wno-dev \ + -DBUILD_TESTS=ON \ + -DBUILD_BENCHMARKS=ON \ + -DBUILD_PRIMS_BENCH=ON \ + -DBUILD_SHARED_LIBS=ON \ + -DRAFT_COMPILE_LIBRARY=ON \ + -DBUILD_CUGRAPH_MG_TESTS=ON ; + + # Build all the libs + build-all -j$(ulimit -Hn); + + # Print cache and dist stats sccache --show-adv-stats; diff --git a/.github/workflows/build-and-test-feature.yml b/.github/workflows/build-and-test-feature.yml index b15b78fe..a27d57d8 100644 --- a/.github/workflows/build-and-test-feature.yml +++ b/.github/workflows/build-and-test-feature.yml @@ -40,3 +40,4 @@ jobs: aws_role_arn: "${{ secrets.GIST_REPO_READ_ORG_GITHUB_TOKEN && 'arn:aws:iam::279114543810:role/nv-gha-token-sccache-devs' || '' }}" rw_sccache_bucket: "${{ secrets.GIST_REPO_READ_ORG_GITHUB_TOKEN && 'rapids-sccache-devs' || '' }}" rw_sccache_region: "${{ vars.AWS_REGION }}" + sccache_dist_scheduler_url: "sccache.gha-runners.nvidia.com" diff --git a/features/src/rapids-build-utils/install.sh b/features/src/rapids-build-utils/install.sh index d3c57bc7..7acf90f7 100644 --- a/features/src/rapids-build-utils/install.sh +++ b/features/src/rapids-build-utils/install.sh @@ -96,10 +96,13 @@ done # Install bash_completion script if command -v devcontainer-utils-generate-bash-completion >/dev/null 2>&1; then - devcontainer-utils-generate-bash-completion \ - --out-file /etc/bash_completion.d/rapids-build-utils-completions \ - ${commands[@]/#/--command rapids-} \ - ; + read -ra commands <<< "${commands[*]/#/--command rapids-}"; + if test "${#commands[@]}" -gt 0; then + devcontainer-utils-generate-bash-completion \ + --out-file /etc/bash_completion.d/rapids-build-utils-completions \ + "${commands[@]}" \ + ; + fi fi find /opt/rapids-build-utils \ diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/generate-scripts.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/generate-scripts.sh index ba7b6d41..cf1e8c3b 100755 --- a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/generate-scripts.sh +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/generate-scripts.sh @@ -17,12 +17,15 @@ generate_completions() { . devcontainer-utils-debug-output 'rapids_build_utils_debug' 'generate-scripts'; readarray -t commands < <(find "${TMP_SCRIPT_DIR}"/ -maxdepth 1 -type f -exec basename {} \;); - - devcontainer-utils-generate-bash-completion \ - --out-file "$(realpath -m "${COMPLETION_FILE}")" \ - --template "$(realpath -m "${COMPLETION_TMPL}")" \ - ${commands[@]/#/--command } \ - ; + read -ra commands <<< "${commands[*]/#/--command }"; + + if test "${#commands[@]}" -gt 0; then + devcontainer-utils-generate-bash-completion \ + --out-file "$(realpath -m "${COMPLETION_FILE}")" \ + --template "$(realpath -m "${COMPLETION_TMPL}")" \ + "${commands[@]}" \ + ; + fi fi } @@ -63,7 +66,7 @@ generate_script() { } generate_all_script_impl() { - local bin="${SCRIPT}-all"; + local bin="${PREFIX:-${SCRIPT}}-${SUFFIX:-all}"; if test -n "${bin:+x}" && ! test -f "${TMP_SCRIPT_DIR}/${bin}"; then ( cat - \ @@ -378,8 +381,18 @@ generate_scripts() { NAME="${cloned_repos[0]:-${repo_names[0]:-}}" \ NAMES="${repo_names[*]@Q}" \ SCRIPT="${script}" \ + PREFIX="${script}" \ generate_all_script ; - done; + done + for kind in "cpp" "python"; do + # Generate a script to run a type of build for all repos + NAME="${cloned_repos[0]:-${repo_names[0]:-}}" \ + NAMES="${repo_names[*]@Q}" \ + SCRIPT="${kind}.build" \ + PREFIX="build" \ + SUFFIX="all-${kind}" \ + generate_all_script ; + done fi } diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/get-num-archs-jobs-and-load.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/get-num-archs-jobs-and-load.sh index 06439630..1809640e 100755 --- a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/get-num-archs-jobs-and-load.sh +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/get-num-archs-jobs-and-load.sh @@ -4,10 +4,6 @@ # rapids-get-num-archs-jobs-and-load [OPTION]... # # Compute an appropriate total number of jobs, load, and CUDA archs to build in parallel. -# This routine scales the input `-j` with respect to the `-a` and `-m` values, taking into account the -# amount of available system memory (free mem + swap), in order to balance the job and arch parallelism. -# -# note: This wouldn't be necessary if `nvcc` interacted with the POSIX jobserver. # # Boolean options: # -h,--help Print this text. @@ -18,16 +14,6 @@ # -j,--parallel Run parallel compilation jobs. # --max-archs Build at most CUDA archs in parallel. # (default: 3) -# --max-total-system-memory An upper-bound on the amount of total system memory (in GiB) to use during -# C++ and CUDA device compilations. -# Smaller values yield fewer parallel C++ and CUDA device compilations. -# (default: all available memory) -# --max-device-obj-memory-usage An upper-bound on the amount of memory each CUDA device object compilation -# is expected to take. This is used to estimate the number of parallel device -# object compilations that can be launched without hitting the system memory -# limit. -# Higher values yield fewer parallel CUDA device object compilations. -# (default: 1) # shellcheck disable=SC1091 . rapids-generate-docstring; @@ -41,10 +27,9 @@ get_num_archs_jobs_and_load() { # shellcheck disable=SC1091 . devcontainer-utils-debug-output 'rapids_build_utils_debug' 'get-num-archs-jobs-and-load'; - # The return value of nproc is (who knew!) constrained by the - # values of OMP_NUM_THREADS and/or OMP_THREAD_LIMIT - # Since we want the physical number of processors here, pass --all - local -r n_cpus="$(nproc --all)"; + # nproc --all returns 2x the number of threads in Ubuntu24.04+, + # so instead we cound the number of processors in /proc/cpuinfo + local -r n_cpus="$(grep -cP 'processor\s+:' /proc/cpuinfo)"; if test ${#j[@]} -gt 0 && ! test -n "${j:+x}"; then j="${n_cpus}"; @@ -52,22 +37,12 @@ get_num_archs_jobs_and_load() { parallel="${j:-${JOBS:-${PARALLEL_LEVEL:-1}}}"; max_archs="${max_archs:-${MAX_DEVICE_OBJ_TO_COMPILE_IN_PARALLEL:-${arch:-}}}"; - max_device_obj_memory_usage="${max_device_obj_memory_usage:-${MAX_DEVICE_OBJ_MEMORY_USAGE:-1Gi}}"; - - local num_re="^[0-9]+$"; - - # Assume un-suffixed inputs means gibibytes - if [[ "${max_device_obj_memory_usage}" =~ ${num_re} ]]; then - max_device_obj_memory_usage="${max_device_obj_memory_usage}Gi"; - fi - - max_device_obj_memory_usage="$(numfmt --from=auto "${max_device_obj_memory_usage}")"; local n_arch="${archs:-1}"; - # currently: 70-real;75-real;80-real;86-real;90 - # see: https://github.com/rapidsai/rapids-cmake/blob/branch-24.04/rapids-cmake/cuda/set_architectures.cmake#L54 - local n_arch_rapids=5; + # currently: 70-real;75-real;80-real;86-real;90-real;100-real;120 + # see: https://github.com/rapidsai/rapids-cmake/blob/branch-25.04/rapids-cmake/cuda/set_architectures.cmake#L59 + local n_arch_rapids=7; if ! test -n "${archs:+x}" && test -n "${INFER_NUM_DEVICE_ARCHITECTURES:+x}"; then archs="$(rapids-select-cmake-define CMAKE_CUDA_ARCHITECTURES "${OPTS[@]}" || echo)"; @@ -101,31 +76,8 @@ get_num_archs_jobs_and_load() { n_arch=$((n_arch > max_archs ? max_archs : n_arch)); fi - local mem_for_device_objs="$((n_arch * max_device_obj_memory_usage))"; - local mem_total="${max_total_system_memory:-${MAX_TOTAL_SYSTEM_MEMORY:-}}"; - - if ! test -n "${mem_total:+x}"; then - local -r free_mem="$(free --bytes | grep -E '^Mem:' | tr -s '[:space:]' | cut -d' ' -f7 || echo '0')"; - local -r freeswap="$(free --bytes | grep -E '^Swap:' | tr -s '[:space:]' | cut -d' ' -f4 || echo '0')"; - mem_total="$((free_mem + freeswap))"; - # Assume un-suffixed inputs means gibibytes - elif [[ "${mem_total}" =~ ${num_re} ]]; then - mem_total="${mem_total}Gi"; - fi - mem_total="$(numfmt --from=auto "${mem_total}")"; - local n_load=$((parallel > n_cpus ? n_cpus : parallel)); - # shellcheck disable=SC2155 - local n_jobs="$( - echo " -scale=0 -max_cpu=(${n_load} / ${n_arch} / 2 * 3) -max_mem=(${mem_total} / ${mem_for_device_objs}) -if(max_cpu < max_mem) max_cpu else max_mem -" | bc - )" - n_jobs=$((n_jobs < 1 ? 1 : n_jobs)); - n_jobs=$((n_arch > 1 ? n_jobs : n_load)); + local n_jobs="$((parallel < 1 ? 1 : parallel))"; echo "declare n_arch=${n_arch}"; echo "declare n_jobs=${n_jobs}"; diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/make-vscode-workspace.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/make-vscode-workspace.sh index 5468a1a5..e6e72924 100755 --- a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/make-vscode-workspace.sh +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/make-vscode-workspace.sh @@ -44,7 +44,6 @@ cpp_lib_dirs() { local j=0; for ((j=0; j < ${!cpp_length:-0}; j+=1)); do - # local cpp_name="${repo}_cpp_${j}_name"; local cpp_sub_dir="${repo}_cpp_${j}_sub_dir"; echo ~/"${!repo_path:-}/${!cpp_sub_dir:-}"; done diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/post-start-command.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/post-start-command.sh index c8fa92fd..babc2713 100755 --- a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/post-start-command.sh +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/post-start-command.sh @@ -1,11 +1,12 @@ #!/usr/bin/env bash if ! test -n "${SKIP_RAPIDS_BUILD_UTILS_POST_START_COMMAND:+x}"; then - rapids-generate-scripts; - rapids-update-build-dir-links -j; - rapids-make-vscode-workspace --update; - rapids-merge-compile-commands-json > ~/compile_commands.json; + rapids-generate-scripts; + rapids-update-build-dir-links -j & + rapids-make-vscode-workspace --update & + rapids-merge-compile-commands-json > ~/compile_commands.json & if test -n "${PYTHON_PACKAGE_MANAGER:+x}"; then rapids-make-"${PYTHON_PACKAGE_MANAGER}"-env "$@" || true; fi + wait fi diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/all.cpp.build.tmpl.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/all.cpp.build.tmpl.sh new file mode 100755 index 00000000..f5f8f52b --- /dev/null +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/all.cpp.build.tmpl.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# Usage: +# build-all-cpp [OPTION]... +# +# Runs build--cpp for each repo in ${NAMES}. +# +# Forwards relevant arguments to each underlying script. +# +# @_include_options /usr/bin/build-${NAME}-cpp -h | tail -n+2; + +# shellcheck disable=SC1091 +. rapids-generate-docstring; + +_build_all_cpp() { + local -; + set -euo pipefail; + + eval "$(_parse_args --take '-h,--help' "$@" <&0)"; + + # shellcheck disable=SC1091 + . devcontainer-utils-debug-output 'rapids_build_utils_debug' 'build-all build-all-cpp'; + + for name in ${NAMES}; do + if command -v build-${name}-cpp >/dev/null 2>&1; then + build-${name}-cpp "${OPTS[@]}"; + fi + done +} + +_build_all_cpp "$@" <&0; diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/all.python.build.tmpl.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/all.python.build.tmpl.sh new file mode 100755 index 00000000..3690da88 --- /dev/null +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/all.python.build.tmpl.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# Usage: +# build-all-python [OPTION]... +# +# Runs build--python for each repo in ${NAMES}. +# +# Forwards relevant arguments to each underlying script. +# +# @_include_options /usr/bin/build-${NAME}-python -h | tail -n+2; + +# shellcheck disable=SC1091 +. rapids-generate-docstring; + +_build_all_python() { + local -; + set -euo pipefail; + + eval "$(_parse_args --take '-h,--help' "$@" <&0)"; + + # shellcheck disable=SC1091 + . devcontainer-utils-debug-output 'rapids_build_utils_debug' 'build-all build-all-python'; + + for name in ${NAMES}; do + if command -v build-${name}-python >/dev/null 2>&1; then + build-${name}-python "${OPTS[@]}"; + fi + done +} + +_build_all_python "$@" <&0; diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/cpp.build.tmpl.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/cpp.build.tmpl.sh index 4c79bffa..1c130905 100755 --- a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/cpp.build.tmpl.sh +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/cpp.build.tmpl.sh @@ -45,13 +45,12 @@ build_${CPP_LIB}_cpp() { # Build C++ lib time ( + echo "Building lib${CPP_LIB}"; export ${CPP_ENV} PATH="$PATH"; local -a cmake_build_args="($(rapids-select-cmake-build-args ${n_jobs:+-j${n_jobs}} "${OPTS[@]}"))"; cmake \ --build "${CPP_SRC}/${BIN_DIR}" \ - "${cmake_build_args[@]}" \ - -- \ - ${n_load:+-l${n_load}} ; + "${cmake_build_args[@]}" ; { set +x; } 2>/dev/null; echo -n "lib${CPP_LIB} build time:"; ) 2>&1; } diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/cpp.configure.tmpl.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/cpp.configure.tmpl.sh index 83e659dc..7d75e7df 100755 --- a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/cpp.configure.tmpl.sh +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/cpp.configure.tmpl.sh @@ -56,7 +56,9 @@ configure_${CPP_LIB}_cpp() { cmake_args+=(-B "${bin_dir:-${CPP_SRC}/${BIN_DIR}}"); time ( + echo "Configuring lib${CPP_LIB}"; export ${CPP_ENV} PATH="$PATH"; + SCCACHE_NO_DIST_COMPILE=1 \ CUDAFLAGS="${CUDAFLAGS:+$CUDAFLAGS }-t=${n_arch}" \ cmake "${cmake_args[@]}"; { set +x; } 2>/dev/null; echo -n "lib${CPP_LIB} configure time:"; diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/python.build.wheel.tmpl.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/python.build.wheel.tmpl.sh index b0c35ce8..c0be79b7 100755 --- a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/python.build.wheel.tmpl.sh +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/python.build.wheel.tmpl.sh @@ -62,10 +62,6 @@ build_${PY_LIB}_python_wheel() { ninja_args+=("-j${n_jobs}"); fi - if test -n "${n_load:+x}"; then - ninja_args+=("-l${n_load}"); - fi - local -a pip_args="( ${pip_args_+"${pip_args_[*]@Q}"} $(rapids-select-pip-wheel-args "$@") @@ -82,9 +78,27 @@ build_${PY_LIB}_python_wheel() { pip_args+=("${PY_SRC}"); + # Ensure SCCACHE_NO_DIST_COMPILE=1 is set while configuring + # so CMake's compiler tests never use the build cluster. + + if ! test -f /tmp/sccache_no_dist_compile.cmake; then + cat <<"EOF" > /tmp/sccache_no_dist_compile.cmake +set(ENV{SCCACHE_NO_DIST_COMPILE} "1") +EOF + fi + + # Merge with outer `-DCMAKE_PROJECT_INCLUDE_BEFORE=` if provided + local -a cmake_project_include_before="( + $(rapids-select-cmake-define CMAKE_PROJECT_INCLUDE_BEFORE "$@" || echo) + /tmp/sccache_no_dist_compile.cmake + )"; + # Join with semicolons + cmake_args+=("-DCMAKE_PROJECT_INCLUDE_BEFORE=$(IFS=";"; echo "${cmake_project_include_before[*]}")") + trap "rm -rf '${PY_SRC}/${py_lib//"-"/"_"}.egg-info'" EXIT; time ( + echo "Building ${PY_LIB} wheel"; export ${PY_ENV} PATH="$PATH"; local cudaflags="${CUDAFLAGS:+$CUDAFLAGS }-t=${n_arch}"; local build_type="$(rapids-select-cmake-build-type "${cmake_args_[@]}")"; diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/python.install.tmpl.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/python.install.tmpl.sh index e91d2364..5c26c93f 100755 --- a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/python.install.tmpl.sh +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/python.install.tmpl.sh @@ -60,10 +60,6 @@ install_${PY_LIB}_python() { ninja_args+=("-j${n_jobs}"); fi - if test -n "${n_load:+x}"; then - ninja_args+=("-l${n_load}"); - fi - local -a pip_args="( ${pip_args_+"${pip_args_[*]@Q}"} $(rapids-select-pip-install-args "$@") @@ -100,9 +96,27 @@ install_${PY_LIB}_python() { pip_args+=("${PY_SRC}"); fi + # Ensure SCCACHE_NO_DIST_COMPILE=1 is set while configuring + # so CMake's compiler tests never use the build cluster. + + if ! test -f /tmp/sccache_no_dist_compile.cmake; then + cat <<"EOF" > /tmp/sccache_no_dist_compile.cmake +set(ENV{SCCACHE_NO_DIST_COMPILE} "1") +EOF + fi + + # Merge with outer `-DCMAKE_PROJECT_INCLUDE_BEFORE=` if provided + local -a cmake_project_include_before="( + $(rapids-select-cmake-define CMAKE_PROJECT_INCLUDE_BEFORE "$@" || echo) + /tmp/sccache_no_dist_compile.cmake + )"; + # Join with semicolons + cmake_args+=("-DCMAKE_PROJECT_INCLUDE_BEFORE=$(IFS=";"; echo "${cmake_project_include_before[*]}")") + trap "rm -rf '${PY_SRC}/${py_lib//"-"/"_"}.egg-info'" EXIT; time ( + echo "Installing ${PY_LIB}"; export ${PY_ENV} PATH="$PATH"; local cudaflags="${CUDAFLAGS:+$CUDAFLAGS }-t=${n_arch}"; local build_type="$(rapids-select-cmake-build-type "${cmake_args_[@]}")"; diff --git a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/repo.clone.tmpl.sh b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/repo.clone.tmpl.sh index 0d1bdbc6..ae7a5daa 100755 --- a/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/repo.clone.tmpl.sh +++ b/features/src/rapids-build-utils/opt/rapids-build-utils/bin/tmpl/repo.clone.tmpl.sh @@ -28,6 +28,7 @@ clone_${NAME}() { --no-update-env -b,--branch -d,--directory + -q,--quiet -u,--upstream --ssh-url --https-url @@ -53,6 +54,7 @@ clone_${NAME}() { --https-url "${https_url}" \ -j ${n_jobs:-$(nproc --all --ignore=1)} \ -c checkout.defaultRemote=upstream \ + "${q[@]}" \ "${OPTS[@]}" \ -- \ "${upstream}" \ @@ -63,7 +65,7 @@ clone_${NAME}() { local upstream_branches="$(git -C "${SRC_PATH}" branch --remotes --list 'upstream/pull-request/*')"; if test -n "${upstream_branches:+x}"; then - git -C "${SRC_PATH}" branch --remotes -d ${upstream_branches}; + git -C "${SRC_PATH}" branch --remotes -d "${q[@]}" ${upstream_branches}; fi git -C "${SRC_PATH}" remote prune upstream; diff --git a/features/src/utils/devcontainer-feature.json b/features/src/utils/devcontainer-feature.json index ee6251c2..71b0bb21 100644 --- a/features/src/utils/devcontainer-feature.json +++ b/features/src/utils/devcontainer-feature.json @@ -1,7 +1,7 @@ { "name": "devcontainer-utils", "id": "utils", - "version": "25.6.3", + "version": "25.6.4", "description": "A feature to install RAPIDS devcontainer utility scripts", "containerEnv": { "BASH_ENV": "/etc/bash.bash_env" diff --git a/features/src/utils/install.sh b/features/src/utils/install.sh index 4a2d3107..3b9f8adf 100644 --- a/features/src/utils/install.sh +++ b/features/src/utils/install.sh @@ -14,6 +14,7 @@ PKGS=( sudo wget socat + procps tzdata gettext-base openssh-client @@ -101,9 +102,16 @@ declare -a commands_and_sources=( "init-ssh-deploy-keys ssh/init-deploy-keys.sh" "init-github-cli github/cli/init.sh" "clone-github-repo github/repo/clone.sh" + "github-user-scopes github/user/scopes.sh" "init-gitlab-cli gitlab/cli/init.sh" "clone-gitlab-repo gitlab/repo/clone.sh" "print-missing-gitlab-token-warning gitlab/print-missing-token-warning.sh" + "install-sccache sccache/install.sh" + "start-sccache sccache/start.sh" + "stop-sccache sccache/stop.sh" + "init-sccache-dist sccache/dist/init.sh" + "configure-sccache-dist sccache/dist/configure.sh" + "sccache-dist-status sccache/dist/status.sh" ) # Install alternatives @@ -117,10 +125,13 @@ done declare -a commands="($(for pair in "${commands_and_sources[@]}"; do cut -d' ' -f1 <<< "${pair}"; done))"; # Install bash_completion script -devcontainer-utils-generate-bash-completion \ - --out-file /etc/bash_completion.d/devcontainer-utils-completions \ - ${commands[@]/#/--command devcontainer-utils-} \ -; +read -ra commands <<< "${commands[*]/#/--command devcontainer-utils-}"; +if test "${#commands[@]}" -gt 0; then + devcontainer-utils-generate-bash-completion \ + --out-file /etc/bash_completion.d/devcontainer-utils-completions \ + "${commands[@]}" \ + ; +fi find /opt/devcontainer \ \( -type d -exec chmod 0775 {} \; \ diff --git a/features/src/utils/opt/devcontainer/bin/creds/s3/propagate.sh b/features/src/utils/opt/devcontainer/bin/creds/s3/propagate.sh index 1410b7e3..ec0129cd 100755 --- a/features/src/utils/opt/devcontainer/bin/creds/s3/propagate.sh +++ b/features/src/utils/opt/devcontainer/bin/creds/s3/propagate.sh @@ -13,13 +13,11 @@ _creds_s3_propagate() { local num_restarts="0"; - if test -n "$(pgrep sccache || echo)"; then - sccache --stop-server >/dev/null 2>&1 || true; - fi + devcontainer-utils-stop-sccache --kill-all; while true; do - if sccache --start-server >/dev/null 2>&1; then + if devcontainer-utils-start-sccache >/dev/null; then if [ "${num_restarts}" -gt "0" ]; then echo "Success!"; fi exit 0; fi diff --git a/features/src/utils/opt/devcontainer/bin/creds/s3/test.sh b/features/src/utils/opt/devcontainer/bin/creds/s3/test.sh index f0e07555..006fa6a3 100755 --- a/features/src/utils/opt/devcontainer/bin/creds/s3/test.sh +++ b/features/src/utils/opt/devcontainer/bin/creds/s3/test.sh @@ -28,17 +28,14 @@ _creds_s3_test() { local aws_session_token="${AWS_SESSION_TOKEN:-"$(sed -n 's/aws_session_token=//p' ~/.aws/credentials 2>/dev/null)"}"; local aws_secret_access_key="${AWS_SECRET_ACCESS_KEY:-"$(sed -n 's/aws_secret_access_key=//p' ~/.aws/credentials 2>/dev/null)"}"; - if test -n "$(pgrep sccache || echo)"; then - sccache --stop-server >/dev/null 2>&1 || true; - fi - - SCCACHE_BUCKET="${bucket:-}" \ - SCCACHE_REGION="${region:-}" \ - AWS_ACCESS_KEY_ID="${aws_access_key_id:-}" \ - AWS_SESSION_TOKEN="${aws_session_token:-}" \ + AWS_PROFILE=none \ + SCCACHE_BUCKET="${bucket:-}" \ + SCCACHE_REGION="${region:-}" \ + AWS_ACCESS_KEY_ID="${aws_access_key_id:-}" \ + AWS_SESSION_TOKEN="${aws_session_token:-}" \ AWS_SECRET_ACCESS_KEY="${aws_secret_access_key:-}" \ - sccache --start-server >/dev/null 2>&1; - sccache --show-stats | grep -qE 'Cache location \s+ s3'; + devcontainer-utils-start-sccache >/dev/null \ + && sccache --show-stats 2>/dev/null | grep -qE 'Cache location \s+ s3'; } _creds_s3_test "$@"; diff --git a/features/src/utils/opt/devcontainer/bin/github/cli/init.sh b/features/src/utils/opt/devcontainer/bin/github/cli/init.sh index 85cb6031..cb2076c7 100755 --- a/features/src/utils/opt/devcontainer/bin/github/cli/init.sh +++ b/features/src/utils/opt/devcontainer/bin/github/cli/init.sh @@ -1,5 +1,8 @@ #! /usr/bin/env bash +# shellcheck disable=SC1091 +. "$(dirname "$(realpath -m "${BASH_SOURCE[0]}")")/../../update-envvars.sh"; + init_github_cli() { local -; set -euo pipefail; @@ -12,7 +15,7 @@ init_github_cli() { local git_protocol="https"; local avoid_gh_cli_ssh_keygen_prompt=; - if [[ "${CODESPACES:-false}" == "true" ]]; then + if "${CODESPACES:-false}"; then git_protocol="https"; else if grep -q "You've successfully authenticated" <<< "$(ssh -T "git@${GITHUB_HOST:-github.com}" 2>&1)"; then @@ -23,67 +26,67 @@ init_github_cli() { fi fi - local -r active_scopes="$(GITHUB_TOKEN="" \ - gh api -i -X GET --silent rate_limit \ - 2>/dev/null \ - | grep -i 'x-oauth-scopes:' \ - | cut -d' ' -f1 --complement \ - | tr -d ',' \ - )"; + read -ra needed_scopes <<< "${SCCACHE_BUCKET_GH_SCOPES:-"read:org"} ${SCCACHE_DIST_GH_SCOPES:-"read:enterprise"}"; + read -ra needed_scopes <<< "${needed_scopes[*]/#/--scopes }"; - local needed_scopes="read:org"; + local -a wanted_scopes="($(devcontainer-utils-github-user-scopes "${needed_scopes[@]}"))"; + read -ra wanted_scopes <<< "${wanted_scopes[*]/#/--scopes }"; - needed_scopes="$( \ - comm -23 \ - <(echo -n "${needed_scopes}" | xargs -r -n1 -d' ' echo | sort -s) \ - <(echo -n "${active_scopes}" | xargs -r -n1 -d' ' echo | sort -s) \ - )"; + local -a needed_scopes="($(devcontainer-utils-github-user-scopes "${needed_scopes[@]}" --complement))"; + read -ra needed_scopes <<< "${needed_scopes[*]/#/--scopes }"; - if [ -n "${needed_scopes}" ]; then + if test "${#needed_scopes[@]}" -gt 0; then local VAR; for VAR in GH_TOKEN GITHUB_TOKEN; do - if [[ -n "$(eval "echo \${${VAR}:-}")" ]]; then - for ENVFILE in /etc/profile "$HOME/.bashrc"; do - if [[ "$(grep -q -E "^${VAR}=$" "$ENVFILE" >/dev/null 2>&1; echo $?)" != 0 ]]; then - echo "${VAR}=" | sudo tee -a "$ENVFILE" >/dev/null || true; - fi - done - unset ${VAR}; + if test -n "${!VAR:+x}"; then + local "_${VAR}=${!VAR}"; + unset_envvar "$VAR"; + unset "$VAR"; fi done fi - read -ra scopes <<< "${active_scopes} ${needed_scopes}"; - # shellcheck disable=SC2068 if ! gh auth status >/dev/null 2>&1; then echo "Logging into GitHub..." >&2; - local -r ssh_keygen="$(which ssh-keygen || echo "")"; + local -r ssh_keygen="$(which ssh-keygen 2>/dev/null || echo)"; - if [ -n "${ssh_keygen}" ] \ - && [ -n "${avoid_gh_cli_ssh_keygen_prompt}" ]; then + if test -n "${ssh_keygen:+x}" \ + && test -n "${avoid_gh_cli_ssh_keygen_prompt:+x}"; then sudo mv "${ssh_keygen}"{,.bak} || true; fi - gh auth login \ - --web --git-protocol ${git_protocol} \ + gh auth login \ + --web --git-protocol "${git_protocol}" \ --hostname "${GITHUB_HOST:-github.com}" \ - ${scopes[@]/#/--scopes } \ + "${wanted_scopes[@]}" \ || echo "Continuing without logging into GitHub"; - if [ -n "${ssh_keygen}" ] \ - && [ -n "${avoid_gh_cli_ssh_keygen_prompt}" ]; then + if test -n "${ssh_keygen:+x}" \ + && test -n "${avoid_gh_cli_ssh_keygen_prompt:+x}"; then sudo mv "${ssh_keygen}"{.bak,} || true; fi - elif [ -n "${needed_scopes}" ]; then + elif test "${#needed_scopes[@]}" -gt 0; then echo "Logging into GitHub..." >&2; - gh auth refresh \ + gh auth refresh \ --hostname "${GITHUB_HOST:-github.com}" \ - ${scopes[@]/#/--scopes } \ + "${wanted_scopes[@]}" \ || echo "Continuing without logging into GitHub"; fi + if test "${#needed_scopes[@]}" -gt 0; then + local _VAR; + for VAR in GH_TOKEN GITHUB_TOKEN; do + local _VAR="_$VAR"; + if test -n "${!_VAR:+x}"; then + export "$VAR=${!_VAR}"; + reset_envvar "$VAR"; + unset "${_VAR}" + fi + done + fi + if gh auth status >/dev/null 2>&1; then if test "$(gh config get git_protocol --host "${GITHUB_HOST:-github.com}")" != "${git_protocol}"; then gh config set git_protocol --host "${GITHUB_HOST:-github.com}" "${git_protocol}"; @@ -105,7 +108,7 @@ init_github_cli() { github_user="$(gh api user --jq '.login // ""' 2>/dev/null || echo)"; fi - export GITHUB_USER="${github_user}"; + export GITHUB_USER="${github_user:-}"; } init_github_cli "$@"; diff --git a/features/src/utils/opt/devcontainer/bin/github/user/scopes.sh b/features/src/utils/opt/devcontainer/bin/github/user/scopes.sh new file mode 100755 index 00000000..31603300 --- /dev/null +++ b/features/src/utils/opt/devcontainer/bin/github/user/scopes.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Usage: +# devcontainer-utils-github-user-scopes [OPTION]... +# +# Report the GitHub user's active and/or desired scopes. +# +# Boolean options: +# -h,--help Print this text. +# -c,--complement Only print needed scopes that are not active. +# +# Options that require values: +# -s,--scopes Additional authentication scopes the user should have. +# Can be specified multiple times. + +_github_user_scopes() { + local -; + set -euo pipefail; + + eval "$(devcontainer-utils-parse-args "$0" "$@" <&0)"; + + # shellcheck disable=SC1091 + . devcontainer-utils-debug-output 'devcontainer_utils_debug' 'github github-user-scopes'; + + local -a active="($(GITHUB_TOKEN="" \ + gh api -i -X GET --silent rate_limit \ + 2>/dev/null \ + | grep -i 'x-oauth-scopes:' \ + | cut -d' ' -f1 --complement \ + | tr -d ',' \ + | tr '\r' '\n' \ + | tr '\n' ' ' \ + | tr -s '[:blank:]' \ + ))" + + local -a wanted="($( \ + comm -23 \ + <(IFS=$'\n'; echo "${scopes[*]}" | sort -s) \ + <(IFS=$'\n'; echo "${active[*]}" | sort -s) \ + ))"; + + if ! test -n "${complement:+x}"; then + wanted=("${active[@]}" "${wanted[@]}"); + fi + + echo "${wanted[*]}"; +} + +_github_user_scopes "$@" <&0; diff --git a/features/src/utils/opt/devcontainer/bin/gitlab/print-missing-token-warning.sh b/features/src/utils/opt/devcontainer/bin/gitlab/print-missing-token-warning.sh index 38cfc814..738231c3 100755 --- a/features/src/utils/opt/devcontainer/bin/gitlab/print-missing-token-warning.sh +++ b/features/src/utils/opt/devcontainer/bin/gitlab/print-missing-token-warning.sh @@ -2,7 +2,7 @@ echo "A GitLab token is required to authenticate via GitLab CLI, but a 'GITLAB_T echo "" 1>&2; echo "Generate a token with 'api' and 'write_repository' scopes at https://${GITLAB_HOST:-gitlab.com}/-/profile/personal_access_tokens" 1>&2; -if [ "${CODESPACES:-false}" = "true" ]; then +if "${CODESPACES:-false}"; then echo "To skip this prompt in the future, add the token as a GitHub codespaces secret named 'GITLAB_TOKEN' at https://github.com/settings/codespaces." 1>&2; echo " ** Be sure to allow the repo that launched this codespace access to the new 'GITLAB_TOKEN' secret **" 1>&2; fi diff --git a/features/src/utils/opt/devcontainer/bin/post-attach-command.sh b/features/src/utils/opt/devcontainer/bin/post-attach-command.sh index 1029ef50..568da2c6 100755 --- a/features/src/utils/opt/devcontainer/bin/post-attach-command.sh +++ b/features/src/utils/opt/devcontainer/bin/post-attach-command.sh @@ -1,8 +1,28 @@ #! /usr/bin/env bash if ! test -n "${SKIP_DEVCONTAINER_UTILS_POST_ATTACH_COMMAND:+x}"; then + sudo mkdir -m 0777 -p /var/log/devcontainer-utils; + sudo touch /var/log/devcontainer-utils/creds-s3.log; + sudo chmod 0777 /var/log/devcontainer-utils/creds-s3.log; + # shellcheck disable=SC1091 . devcontainer-utils-init-git-interactive; + + # Reset sccache-dist configuration + SCCACHE_DIST_URL= devcontainer-utils-configure-sccache-dist; + + # Maybe load sccache client credentials # shellcheck disable=SC1091 . devcontainer-utils-creds-s3-init; + + # Update sccache client configuration to enable/disable sccache-dist + if test -n "${DEVCONTAINER_UTILS_ENABLE_SCCACHE_DIST:+x}"; then + devcontainer-utils-install-sccache --repo trxcllnt/sccache; + if test -n "${SCCACHE_DIST_AUTH_TOKEN:+x}"; then + devcontainer-utils-init-sccache-dist; + else + # Update ~/.config/sccache/config to use gh token auth + devcontainer-utils-init-sccache-dist --enable-with-github-auth; + fi + fi fi diff --git a/features/src/utils/opt/devcontainer/bin/sccache/dist/configure.sh b/features/src/utils/opt/devcontainer/bin/sccache/dist/configure.sh new file mode 100755 index 00000000..bef92f2f --- /dev/null +++ b/features/src/utils/opt/devcontainer/bin/sccache/dist/configure.sh @@ -0,0 +1,158 @@ +#!/usr/bin/env bash + +# Usage: +# devcontainer-utils-configure-sccache-dist [OPTION]... +# +# Generate and write sccache-dist configuration to `$HOME/.config/sccache/config` +# +# Boolean options: +# -h,--help Print this text. +# --no-local-compile-fallback Disable building locally after retrying transient sccache-dist errors. +# (default: false) +# --connection-pool Enable sccache client HTTP connection pool. +# (default: false) +# --keepalive Enable sccache client HTTP keepalive. +# +# Options that require values: +# --auth-type (token|oauth2_code_grant_pkce|oauth2_implicit) Auth type for build cluster auth. +# Disables client auth if omitted (not recommended). +# --auth-token Token used to authenticate with the build cluster when `--auth-type=token`. +# --client-id Client ID used to authenticate with the build cluster when `--auth-type=(oauth2_code_grant_pkce|oauth2_implicit)`. +# --auth-url Auth URL used to authenticate with the build cluster when `--auth-type=(oauth2_code_grant_pkce|oauth2_implicit)`. +# --token-url Token URL used to authenticate with the build cluster when `--auth-type=oauth2_code_grant_pkce`. +# --connect-timeout The sccache client HTTP connection timeout. +# (default: 5) +# --request-timeout The sccache client HTTP request timeout. +# (default: 600) +# --keepalive-interval The sccache HTTP keepalive interval +# (default: 20) +# --keepalive-timeout The sccache HTTP keepalive timeout +# (default: 60) +# --scheduler-url URL of the sccache-dist build cluster. +# --max-retries Maximum number of times to retry transient sccache-dist errors. +# Pass `--max-retries inf` to retry infinitely. +# Combining `--max-retries inf` with `--no-local-compile-fallback` +# ensures the sccache client relies exclusively on the build cluster. +# (default: 0) + +_configure_sccache_dist() { + local -; + set -euo pipefail; + + eval "$(devcontainer-utils-parse-args "$0" "$@" <&0)"; + + # shellcheck disable=SC1091 + . devcontainer-utils-debug-output 'devcontainer_utils_debug' 'sccache configure-sccache-dist'; + + local sccache_conf="${SCCACHE_CONF:-"$HOME/.config/sccache/config"}"; + + mkdir -p "$(dirname "$sccache_conf")"; + touch "$sccache_conf"; + + # Remove the values we're about to set. Deletes toml ranges like this: + # [dist] + # max_retries = 0 + # fallback_to_local_compile = true + # scheduler_url = "foo.bar.xyz.com" + # + # [dist.net] + # connection_pool = true + # connect_timeout = 5 + # request_timeout = 600 + # + # [dist.net.keepalive] + # enabled = true + # interval = 20 + # timeout = 60 + # + # [dist.auth] + # type = "foo" + # token = "bar" + + while grep -qE "^\[dist.*\]" "${sccache_conf}"; do + sed -Ei '/^\[dist.*\]/,/^(\s|\[)*$/d' "${sccache_conf}"; + done + + printf "%s\n" "$(< "${sccache_conf}")" > "${sccache_conf}.new" + mv "${sccache_conf}"{.new,}; + + scheduler_url="${scheduler_url:-${SCCACHE_DIST_URL-}}"; + + # Write our new values + if test -n "${scheduler_url:+x}"; then + cat <<< "" >> "${sccache_conf}"; + cat <<< "[dist]" >> "${sccache_conf}"; + cat <<< "scheduler_url = \"${scheduler_url}\"" >> "${sccache_conf}"; + if test -n "${max_retries:+x}"; then + cat <<< "max_retries = ${max_retries}" >> "${sccache_conf}"; + fi + if test -n "${no_local_compile_fallback:+x}"; then + cat <<< "fallback_to_local_compile = false" >> "${sccache_conf}"; + fi + + if test -n "${connection_pool:+x}" \ + || test -n "${connect_timeout:+x}" \ + || test -n "${request_timeout:+x}"; then + cat <<< "" >> "${sccache_conf}"; + cat <<< "[dist.net]" >> "${sccache_conf}"; + if test -n "${connection_pool:+x}"; then + cat <<< "connection_pool = true" >> "${sccache_conf}"; + fi + if test -n "${connect_timeout:+x}"; then + cat <<< "connect_timeout = "${connect_timeout}"" >> "${sccache_conf}"; + fi + if test -n "${request_timeout:+x}"; then + cat <<< "request_timeout = "${request_timeout}"" >> "${sccache_conf}"; + fi + fi + + if test -n "${keepalive:+x}"; then + cat <<< "" >> "${sccache_conf}"; + cat <<< "[dist.net.keepalive]" >> "${sccache_conf}"; + cat <<< "enabled = true" >> "${sccache_conf}"; + if test -n "${keepalive_interval:+x}"; then + cat <<< "interval = "${keepalive_interval}"" >> "${sccache_conf}"; + fi + if test -n "${keepalive_timeout:+x}"; then + cat <<< "timeout = "${keepalive_timeout}"" >> "${sccache_conf}"; + fi + fi + + case "${auth_type:-}" in + token) + if test -n "${auth_token:+x}"; then + cat <<< "" >> "${sccache_conf}"; + cat <<< "[dist.auth]" >> "${sccache_conf}"; + cat <<< "type = \"${auth_type}\"" >> "${sccache_conf}"; + cat <<< "token = \"${auth_token}\"" >> "${sccache_conf}"; + fi + ;; + oauth2_implicit) + if test -n "${auth_url:+x}" \ + && test -n "${client_id:+x}"; then + cat <<< "" >> "${sccache_conf}"; + cat <<< "[dist.auth]" >> "${sccache_conf}"; + cat <<< "type = \"${auth_type}\"" >> "${sccache_conf}"; + cat <<< "auth_url = \"${auth_url}\"" >> "${sccache_conf}"; + cat <<< "client_id = \"${client_id}\"" >> "${sccache_conf}"; + fi + ;; + oauth2_code_grant_pkce) + if test -n "${auth_url:+x}" \ + && test -n "${client_id:+x}" \ + && test -n "${token_url:+x}"; then + cat <<< "" >> "${sccache_conf}"; + cat <<< "[dist.auth]" >> "${sccache_conf}"; + cat <<< "type = \"${auth_type}\"" >> "${sccache_conf}"; + cat <<< "auth_url = \"${auth_url}\"" >> "${sccache_conf}"; + cat <<< "client_id = \"${client_id}\"" >> "${sccache_conf}"; + cat <<< "token_url = \"${token_url}\"" >> "${sccache_conf}"; + fi + ;; + *) + ;; + esac + fi +} + +_configure_sccache_dist "$@" <&0; diff --git a/features/src/utils/opt/devcontainer/bin/sccache/dist/init.sh b/features/src/utils/opt/devcontainer/bin/sccache/dist/init.sh new file mode 100755 index 00000000..11df3166 --- /dev/null +++ b/features/src/utils/opt/devcontainer/bin/sccache/dist/init.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +# Usage: +# devcontainer-utils-init-sccache-dist [OPTION]... +# +# Generate and write sccache-dist configuration to `$HOME/.config/sccache/config`, +# then restart sccache to ensure the configuration is applied. +# +# Boolean options: +# -h,--help Print this text. +# -e,--enable-sccache-dist Enable sccache-dist. If omitted, disable sccache-dist. +# (default: false) +# --enable-with-github-auth Enable sccache-dist and configure it to use GitHub token auth. +# Shorthand for `-e --auth-type token --auth-token $(gh auth token)` +# (default: false) +# --no-local-compile-fallback Disable building locally after retrying transient sccache-dist errors. +# (default: false) +# --connection-pool Enable sccache client HTTP connection pool. +# (default: false) +# --keepalive Enable sccache client HTTP keepalive. +# +# Options that require values: +# --auth-type (token|oauth2_code_grant_pkce|oauth2_implicit) Auth type for build cluster auth. +# (default: token) +# --auth-token Token used to authenticate with the build cluster when `--auth-type=token`. +# --client-id Client ID used to authenticate with the build cluster when `--auth-type=(oauth2_code_grant_pkce|oauth2_implicit)`. +# --auth-url Auth URL used to authenticate with the build cluster when `--auth-type=(oauth2_code_grant_pkce|oauth2_implicit)`. +# --token-url Token URL used to authenticate with the build cluster when `--auth-type=oauth2_code_grant_pkce`. +# --connect-timeout The sccache client HTTP connection timeout. +# (default: 5) +# --request-timeout The sccache client HTTP request timeout. +# (default: 600) +# --keepalive-interval The sccache HTTP keepalive interval +# (default: 20) +# --keepalive-timeout The sccache HTTP keepalive timeout +# (default: 60) +# --scheduler-url URL of the sccache-dist build cluster. +# --max-retries Maximum number of times to retry transient sccache-dist errors. +# Pass `--max-retries inf` to retry infinitely. +# Combining `--max-retries inf` with `--no-local-compile-fallback` +# ensures the sccache client relies exclusively on the build cluster. +# (default: 0) + +_init_sccache_dist() { + local -; + set -euo pipefail; + + eval "$(devcontainer-utils-parse-args "$0" --take '-e,--enable-sccache-dist --enable-with-github-auth' "$@" <&0)"; + + # shellcheck disable=SC1091 + . devcontainer-utils-debug-output 'devcontainer_utils_debug' 'sccache init-sccache-dist'; + + while true; do + if test -n "${enable_sccache_dist:+x}"; then + # Passthrough args to configure-sccache-dist + devcontainer-utils-configure-sccache-dist - <<< "${OPTS[*]@Q}"; + break; + fi + if test -n "${enable_with_github_auth:+x}"; then + if devcontainer-utils-shell-is-interactive; then + # Initialize the GitHub CLI with the appropriate user scopes + # shellcheck disable=SC1091 + . devcontainer-utils-init-github-cli; + fi + read -ra sccache_dist_gh_scopes <<< "${SCCACHE_DIST_GH_SCOPES:-"read:enterprise"}"; + read -ra sccache_dist_gh_scopes <<< "${sccache_dist_gh_scopes[*]/#/--scopes }"; + if grep -qE "^$" <(devcontainer-utils-github-user-scopes "${sccache_dist_gh_scopes[@]}" --complement); then + devcontainer-utils-configure-sccache-dist - <<< "--auth-type token --auth-token '$(gh auth token)' ${OPTS[*]@Q}"; + break; + fi + fi + # Reset sccache-dist configuration + SCCACHE_DIST_URL= devcontainer-utils-configure-sccache-dist; + break; + done + + # Restart the sccache client with the new configuration + devcontainer-utils-start-sccache; + + # Verify sccache-dist status and configuration + if sccache --dist-status 2>/dev/null | jq -er '.SchedulerStatus? != null' >/dev/null 2>&1; then + devcontainer-utils-sccache-dist-status -c 17 -f tsv; + fi +} + +_init_sccache_dist "$@" <&0; diff --git a/features/src/utils/opt/devcontainer/bin/sccache/dist/status.sh b/features/src/utils/opt/devcontainer/bin/sccache/dist/status.sh new file mode 100755 index 00000000..050a78f0 --- /dev/null +++ b/features/src/utils/opt/devcontainer/bin/sccache/dist/status.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash + +# Usage: +# devcontainer-utils-sccache-dist-status [OPTION]... +# +# Print and optionally format `sccache --dist-status`. +# +# Boolean options: +# -h,--help Print this text. +# +# Options that require values: +# -c|--col-width Max column width in number of characters. +# String columns wider than this will be truncated with "...". +# (default: $COLUMNS) +# -f|--format (csv|tsv|json) The `sccache --dist-status` output format. +# (default: "json") +# + +_sccache_dist_status() { + + local -; + set -euo pipefail; + + eval "$(devcontainer-utils-parse-args "$0" "$@" <&0)"; + + # shellcheck disable=SC1091 + . devcontainer-utils-debug-output 'devcontainer_utils_debug' 'sccache sccache-dist-status'; + + f="${f:-${format:-json}}"; + c="${c:-${col_width:-${COLUMNS:-1000000000}}}"; + + # Print current dist status to verify we're connected + sccache 2>/dev/null --dist-status \ + | { + # Passthrough if the format is json + if test "$f" = json; then + cat - <(echo) + else + + cat - | jq -r -f <(cat < $c) + then \$x[0:$((c-3))] + "..." + else \$x + end + ); + def info_to_row: { + time: now | floor, + type: (.type // "server"), + id: .id, + servers: (if .servers == null then "-" else (.servers | length) end), + cpus: .info.occupancy, + util: ((.info.cpu_usage // 0) * 100 | floor | . / 100 | tostring | . + "%"), + jobs: (.jobs.loading + .jobs.pending + .jobs.running), + loading: .jobs.loading, + pending: .jobs.pending, + running: .jobs.running, + accepted: .jobs.accepted, + finished: .jobs.finished, + u_time: ((.u_time // 0) | tostring | . + "s") + }; + + .SchedulerStatus as [\$x, \$y] | [ + (\$y + { id: \$x, type: "scheduler", u_time: (\$y.servers // {} | map(.u_time) | min | . // "-" | tostring) }), + (\$y.servers // [] | sort_by(.id)[]) + ] + | map(info_to_row) as \$rows + | (\$rows[0] | keys_unsorted) as \$cols + | (\$rows | map(. as \$row | \$cols | map(\$row[.] | truncate_val))) as \$rows + | (\$cols | map(truncate_val)), \$rows[] | @csv +EOF +) + fi + } \ + | { + # Passthrough if the format is csv or json + # Otherwise, transform the csv into a tsv. + if test "$f" = tsv; then + if [[ "$(grep DISTRIB_RELEASE= /etc/lsb-release | cut -d= -f2)" > "20.04" ]]; then + cat - | sed 's/\"//g' | column -t -s, -R $(seq -s, 1 13) + else + cat - | sed 's/\"//g' | column -t -s, + fi + else + cat - + fi + } +} + +_sccache_dist_status "$@" <&0; diff --git a/features/src/utils/opt/devcontainer/bin/sccache/install.sh b/features/src/utils/opt/devcontainer/bin/sccache/install.sh new file mode 100755 index 00000000..4a650b86 --- /dev/null +++ b/features/src/utils/opt/devcontainer/bin/sccache/install.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash + +# Usage: +# devcontainer-utils-install-sccache [OPTION]... +# +# Install a specific sccache client binary from GitHub releases. +# +# Boolean options: +# -h,--help Print this text. +# +# Options that require values: +# --repo The GitHub repository to use. +# (default: mozilla/sccache) +# --version The sccache version to install (e.g. `0.10.0`). +# (default: latest) +# + + +# Assign variable one scope above the caller +# Usage: local "$1" && _upvar $1 "value(s)" +# Param: $1 Variable name to assign value to +# Param: $* Value(s) to assign. If multiple values, an array is +# assigned, otherwise a single value is assigned. +# See: http://fvue.nl/wiki/Bash:_Passing_variables_by_reference +_upvar() { + if unset -v "$1"; then + if (( $# == 2 )); then + eval $1=\"\$2\"; + else + eval $1=\(\"\${@:2}\"\); + fi; + fi +} + +# Figure out correct version of a three part version number is not passed +_find_version_from_git_tags() { + local variable_name="$1" + local requested_version="${!variable_name}" + if [ "${requested_version}" = "none" ]; then return; fi + local repository="$2" + local prefix="${3:-"tags/v"}" + local separator="${4:-"."}" + local suffix="${5:-}" + local last_part_optional="${6:-"false"}" + if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then + local escaped_separator=${separator//./\\.} + local last_part= + if [ "${last_part_optional}" = "true" ]; then + last_part+="(${escaped_separator}[0-9]+)?" + last_part+="(${escaped_separator}[0-9]+)?" + if [ -n "${suffix}" ]; then + last_part+="(${suffix})?" + fi + else + last_part+="${escaped_separator}[0-9]+" + last_part+="${escaped_separator}[0-9]+" + if [ -n "${suffix}" ]; then + last_part+="(${suffix})" + fi + fi + local regex="${prefix}\\K[0-9]+${last_part}$" + local remote_upstream_fetch="$(git --no-pager config get remote.upstream.fetch)"; + if test -n "${remote_upstream_fetch:+x}"; then + git config unset --global remote.upstream.fetch + fi + local -r version_list="$(git ls-remote --tags "${repository}" | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)" + if test -n "${remote_upstream_fetch:+x}"; then + git config set --global remote.upstream.fetch "${remote_upstream_fetch}"; + fi + if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then + _upvar "${variable_name}" "$(head -n 1 <<< "${version_list}")" + else + set +e + _upvar "${variable_name}" "$(grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)" <<< "${version_list}")" + set -e + fi + fi + if [ -z "${!variable_name}" ] || ! grep "^${!variable_name//./\\.}$" <<< "${version_list}" > /dev/null 2>&1; then + echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2 + return 1 + fi +} + +_install_sccache() { + local -; + set -euo pipefail; + + eval "$(devcontainer-utils-parse-args "$0" "$@" <&0)"; + + # shellcheck disable=SC1091 + . devcontainer-utils-debug-output 'devcontainer_utils_debug' 'sccache install-sccache'; + + local sccache_version="${version:-"latest"}"; + local github_repo="${repo:-"mozilla/sccache"}"; + + _find_version_from_git_tags sccache_version "https://github.com/$github_repo" "" "" "-.*" "true"; + + while pgrep sccache >/dev/null 2>&1; do + devcontainer-utils-stop-sccache --kill-all; + done + + # Install sccache + wget --no-hsts -q -O- \ + "https://github.com/$github_repo/releases/download/v$sccache_version/sccache-v$sccache_version-$(uname -m)-unknown-linux-musl.tar.gz" \ + | sudo tar -C /usr/bin -zf - --wildcards --strip-components=1 -x '*/sccache' \ + && sudo chmod +x /usr/bin/sccache; + + echo "Installed sccache v$(sccache --version | cut -d' ' -f2) to $(which sccache)" +} + +_install_sccache "$@" <&0; diff --git a/features/src/utils/opt/devcontainer/bin/sccache/start.sh b/features/src/utils/opt/devcontainer/bin/sccache/start.sh new file mode 100755 index 00000000..905a5286 --- /dev/null +++ b/features/src/utils/opt/devcontainer/bin/sccache/start.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# Usage: +# devcontainer-utils-start-sccache [OPTION]... +# +# Start the sccache server in the foreground or background. +# +# Boolean options: +# -h,--help Print this text. +# -f,--foreground Start the sccache server in the foreground. +# +# Options that require values: +# -p,--port Start the sccache server on . +# If there is already a server listening +# on this port, it will be killed first. +# (default: ${SCCACHE_SERVER_PORT:-4226}) +# + +_start_sccache() { + local -; + set -euo pipefail; + + eval "$(devcontainer-utils-parse-args "$0" "$@" <&0)"; + + # shellcheck disable=SC1091 + . devcontainer-utils-debug-output 'devcontainer_utils_debug' 'sccache start-sccache'; + + local sccache_port="${p:-${port:-${SCCACHE_SERVER_PORT:-4226}}}"; + local pidfile="/tmp/sccache.${sccache_port}.pid"; + + # Stop any existing server + devcontainer-utils-stop-sccache -p "${sccache_port}"; + + local logfile="${SCCACHE_ERROR_LOG:-/tmp/sccache.log}"; + local log_lvl="${SCCACHE_LOG:-${SCCACHE_SERVER_LOG-}}"; + + logfile="$(dirname "$logfile")/$(basename -s .log "$logfile").${sccache_port}.log"; + + if test -n "${f:-${foreground:+x}}"; then + # Unset this so sccache outputs to stderr + unset SCCACHE_ERROR_LOG; + # Increase the open file limit so users can do `make -j1024` + ulimit -n "$(ulimit -Hn)"; + # Start the sccache server in the foreground + RUST_LOG_STYLE="always" \ + SCCACHE_NO_DAEMON=1 \ + SCCACHE_START_SERVER=1 \ + SCCACHE_SERVER_LOG="${log_lvl}" \ + SCCACHE_SERVER_PORT="${sccache_port}" \ + sccache 2>&1 \ + | tee >(cat <(echo "=== sccache logfile: $logfile ===") \ + <(cat - ) \ + >&2) \ + >(sed -u 's/\x1B\[[0-9;]*[JKmsu]//g' >"$logfile") \ + >/dev/null ; + else + # Start the sccache server in the background + RUST_LOG_STYLE="never" \ + SCCACHE_ERROR_LOG="${logfile}" \ + SCCACHE_SERVER_LOG="${log_lvl}" \ + SCCACHE_SERVER_PORT="${sccache_port}" \ + sccache --start-server 2>/dev/null \ + | tee "$logfile"; + # Write the pid to the pidfile + pgrep sccache | sort -n | head -n1 | tee "${pidfile}" >/dev/null; + # Increase the open file limit so users can do `make -j1024` + prlimit --nofile=$(ulimit -Hn):$(ulimit -Hn) --pid "$(cat "${pidfile}")"; + echo "=== sccache logfile: $logfile ===" >&2; + echo "=== sccache pidfile: $pidfile ===" >&2; + fi +} + +_start_sccache "$@" <&0; diff --git a/features/src/utils/opt/devcontainer/bin/sccache/stop.sh b/features/src/utils/opt/devcontainer/bin/sccache/stop.sh new file mode 100755 index 00000000..39c54a81 --- /dev/null +++ b/features/src/utils/opt/devcontainer/bin/sccache/stop.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +# Usage: +# devcontainer-utils-stop-sccache [OPTION]... +# +# Gracefully or forcefully stop the sccache server. +# +# Boolean options: +# -h,--help Print this text. +# -k,--kill SIGKILL the sccache server. +# -a,--kill-all SIGKILL all sccache processes. +# +# Options that require values: +# -p,--port Stop the sccache server on . +# (default: ${SCCACHE_SERVER_PORT:-4226}) +# + +_stop_sccache() { + local -; + set -euo pipefail; + + eval "$(devcontainer-utils-parse-args "$0" "$@" <&0)"; + + # shellcheck disable=SC1091 + . devcontainer-utils-debug-output 'devcontainer_utils_debug' 'sccache stop-sccache'; + + if ! pgrep sccache >/dev/null 2>&1; then + return 0; + fi + + local sccache_port="${p:-${port:-${SCCACHE_SERVER_PORT:-4226}}}"; + local pidfile="/tmp/sccache.${sccache_port}.pid"; + + if test -n "${a:-${kill_all:+x}}"; then + # Shutdown all sccache processes forcefully + pkill -9 sccache >/dev/null 2>&1 || true; + elif test -n "${k:-${kill:+x}}" && test -f "${pidfile}"; then + # Shutdown the sccache process on `$sccache_port` forcefully + pkill -9 --pidfile "${pidfile}" >/dev/null 2>&1 || true; + else + # Shutdown gracefully + SCCACHE_SERVER_PORT="${sccache_port}" \ + sccache --stop-server >/dev/null 2>&1 || true; + if test -f "${pidfile}"; then + # Wait for the server to shutdown + if command -v pidwait >/dev/null 2>&1; then + pidwait --pidfile "${pidfile}" >/dev/null 2>&1 || true; + else + while IFS= read -r pid; do + if test -n "${pid:+x}"; then + while test -e "/proc/${pid}"; do + sleep 0.1; + done + fi + done < "${pidfile}" + fi + fi + fi + + rm -f "${pidfile}" 2>/dev/null || true; +} + +_stop_sccache "$@" <&0; diff --git a/features/src/utils/opt/devcontainer/bin/shell-is-interactive.sh b/features/src/utils/opt/devcontainer/bin/shell-is-interactive.sh index c466a2f1..3a923566 100755 --- a/features/src/utils/opt/devcontainer/bin/shell-is-interactive.sh +++ b/features/src/utils/opt/devcontainer/bin/shell-is-interactive.sh @@ -1,6 +1,6 @@ #! /usr/bin/env bash -if tty >/dev/null 2>&1 && (exec /dev/null 2>&1 && (exec /dev/null 2>&1; do + devcontainer-utils-stop-sccache --kill-all; + done + + sudo rm -rf \ + ~/.aws/ \ + ~/.config/gh/ \ + ~/.config/sccache/config \ + /var/log/devcontainer-utils/creds-s3.log; cp /tmp/.bashrc-clean ~/.bashrc; + sudo cp /usr/bin/sccache{.orig,}; echo "#! /usr/bin/env bash" | sudo tee "${utils_profile_script}" >/dev/null; sudo chmod +x "${utils_profile_script}"; . ~/.bashrc; - - if test -n "$(pgrep sccache || echo)"; then - sccache --stop-server >/dev/null 2>&1 || true; - fi } write_bad_creds() { - devcontainer-utils-vault-s3-creds-persist <<< " + devcontainer-utils-creds-s3-persist <<< " --stamp='$(date '+%s')' --bucket='${rw_sccache_bucket:-}' --region='${rw_sccache_region:-}' @@ -83,6 +89,14 @@ expect_local_disk_cache_is_used() { grep -qE 'Cache location \s+ Local disk' <<< "${stats}"; } +expect_sccache_dist_auth_token_is_gh_token() { + set -e; + test -f ~/.config/sccache/config; + grep -qE "[dist.auth]" ~/.config/sccache/config; + grep -qE "type = \"token\"" ~/.config/sccache/config; + grep -qE "token = \"${gh_token}\"" ~/.config/sccache/config; +} + if test -n "${vault_host:+x}" \ && test -n "${rw_sccache_bucket:+x}"; then @@ -245,6 +259,45 @@ if test -n "${gh_token:+x}" \ check "bad stored creds with GH_TOKEN, AWS_ROLE_ARN, and SCCACHE_BUCKET should regenerate credentials" bad_stored_creds_with_GH_TOKEN_AWS_ROLE_ARN_and_SCCACHE_BUCKET_should_regenerate_credentials; fi +if test -n "${gh_token:-}" \ +&& test -n "${sccache_dist_scheduler_url:-}"; then + + does_not_set_sccache_dist_auth_config() { + reset_state; + DEVCONTAINER_UTILS_ENABLE_SCCACHE_DIST=1 \ + SCCACHE_DIST_URL="${sccache_dist_scheduler_url}" \ + devcontainer-utils-post-attach-command; + if expect_sccache_dist_auth_token_is_gh_token; then + return 1; + fi + } + + check "does not configure sccache-dist auth when no GH_TOKEN or SCCACHE_DIST_AUTH_TOKEN" does_not_set_sccache_dist_auth_config; + + uses_GH_TOKEN_as_sccache_dist_auth_token() { + reset_state; + GH_TOKEN="${gh_token}" \ + DEVCONTAINER_UTILS_ENABLE_SCCACHE_DIST=1 \ + SCCACHE_DIST_URL="${sccache_dist_scheduler_url}" \ + devcontainer-utils-post-attach-command; + expect_sccache_dist_auth_token_is_gh_token; + } + + check "configures sccache-dist to use GH_TOKEN as auth token" uses_GH_TOKEN_as_sccache_dist_auth_token; + + # uses_SCCACHE_DIST_AUTH_TOKEN_as_sccache_dist_auth_token() { + # reset_state; + # SCCACHE_DIST_AUTH_TYPE=token \ + # SCCACHE_DIST_AUTH_TOKEN="${gh_token}" \ + # DEVCONTAINER_UTILS_ENABLE_SCCACHE_DIST=1 \ + # SCCACHE_DIST_URL="${sccache_dist_scheduler_url}" \ + # devcontainer-utils-post-attach-command; + # expect_sccache_dist_auth_token_is_gh_token; + # } + + # check "configures sccache-dist to use SCCACHE_DIST_AUTH_TOKEN as auth token" uses_SCCACHE_DIST_AUTH_TOKEN_as_sccache_dist_auth_token; +fi + # Report result # If any of the checks above exited with a non-zero exit code, the test will fail. reportResults diff --git a/features/test/utils/ubuntu20.04.sh b/features/test/utils/ubuntu20.04.sh index d260ea90..866cad28 100644 --- a/features/test/utils/ubuntu20.04.sh +++ b/features/test/utils/ubuntu20.04.sh @@ -18,6 +18,7 @@ source dev-container-features-test-lib export VAULT_S3_TTL="${VAULT_S3_TTL:-"900"}"; cp ~/.bashrc /tmp/.bashrc-clean; +sudo cp /usr/bin/sccache{,.orig}; utils_profile_script="$(find /etc/profile.d/ -type f -name '*-devcontainer-utils.sh')"; @@ -49,19 +50,24 @@ reset_state() { unset AWS_SESSION_TOKEN; unset AWS_SECRET_ACCESS_KEY; - rm -rf ~/.aws/ ~/.config/gh/; + while pgrep sccache >/dev/null 2>&1; do + devcontainer-utils-stop-sccache --kill-all; + done + + sudo rm -rf \ + ~/.aws/ \ + ~/.config/gh/ \ + ~/.config/sccache/config \ + /var/log/devcontainer-utils/creds-s3.log; cp /tmp/.bashrc-clean ~/.bashrc; + sudo cp /usr/bin/sccache{.orig,}; echo "#! /usr/bin/env bash" | sudo tee "${utils_profile_script}" >/dev/null; sudo chmod +x "${utils_profile_script}"; . ~/.bashrc; - - if test -n "$(pgrep sccache || echo)"; then - sccache --stop-server >/dev/null 2>&1 || true; - fi } write_bad_creds() { - devcontainer-utils-vault-s3-creds-persist <<< " + devcontainer-utils-creds-s3-persist <<< " --stamp='$(date '+%s')' --bucket='${rw_sccache_bucket:-}' --region='${rw_sccache_region:-}' @@ -83,6 +89,14 @@ expect_local_disk_cache_is_used() { grep -qE 'Cache location \s+ Local disk' <<< "${stats}"; } +expect_sccache_dist_auth_token_is_gh_token() { + set -e; + test -f ~/.config/sccache/config; + grep -qE "[dist.auth]" ~/.config/sccache/config; + grep -qE "type = \"token\"" ~/.config/sccache/config; + grep -qE "token = \"${gh_token}\"" ~/.config/sccache/config; +} + if test -n "${vault_host:+x}" \ && test -n "${rw_sccache_bucket:+x}"; then @@ -245,6 +259,45 @@ if test -n "${gh_token:+x}" \ check "bad stored creds with GH_TOKEN, AWS_ROLE_ARN, and SCCACHE_BUCKET should regenerate credentials" bad_stored_creds_with_GH_TOKEN_AWS_ROLE_ARN_and_SCCACHE_BUCKET_should_regenerate_credentials; fi +if test -n "${gh_token:-}" \ +&& test -n "${sccache_dist_scheduler_url:-}"; then + + does_not_set_sccache_dist_auth_config() { + reset_state; + DEVCONTAINER_UTILS_ENABLE_SCCACHE_DIST=1 \ + SCCACHE_DIST_URL="${sccache_dist_scheduler_url}" \ + devcontainer-utils-post-attach-command; + if expect_sccache_dist_auth_token_is_gh_token; then + return 1; + fi + } + + check "does not configure sccache-dist auth when no GH_TOKEN or SCCACHE_DIST_AUTH_TOKEN" does_not_set_sccache_dist_auth_config; + + uses_GH_TOKEN_as_sccache_dist_auth_token() { + reset_state; + GH_TOKEN="${gh_token}" \ + DEVCONTAINER_UTILS_ENABLE_SCCACHE_DIST=1 \ + SCCACHE_DIST_URL="${sccache_dist_scheduler_url}" \ + devcontainer-utils-post-attach-command; + expect_sccache_dist_auth_token_is_gh_token; + } + + check "configures sccache-dist to use GH_TOKEN as auth token" uses_GH_TOKEN_as_sccache_dist_auth_token; + + # uses_SCCACHE_DIST_AUTH_TOKEN_as_sccache_dist_auth_token() { + # reset_state; + # SCCACHE_DIST_AUTH_TYPE=token \ + # SCCACHE_DIST_AUTH_TOKEN="${gh_token}" \ + # DEVCONTAINER_UTILS_ENABLE_SCCACHE_DIST=1 \ + # SCCACHE_DIST_URL="${sccache_dist_scheduler_url}" \ + # devcontainer-utils-post-attach-command; + # expect_sccache_dist_auth_token_is_gh_token; + # } + + # check "configures sccache-dist to use SCCACHE_DIST_AUTH_TOKEN as auth token" uses_SCCACHE_DIST_AUTH_TOKEN_as_sccache_dist_auth_token; +fi + # Report result # If any of the checks above exited with a non-zero exit code, the test will fail. reportResults diff --git a/features/test/utils/ubuntu22.04.sh b/features/test/utils/ubuntu22.04.sh index d260ea90..866cad28 100644 --- a/features/test/utils/ubuntu22.04.sh +++ b/features/test/utils/ubuntu22.04.sh @@ -18,6 +18,7 @@ source dev-container-features-test-lib export VAULT_S3_TTL="${VAULT_S3_TTL:-"900"}"; cp ~/.bashrc /tmp/.bashrc-clean; +sudo cp /usr/bin/sccache{,.orig}; utils_profile_script="$(find /etc/profile.d/ -type f -name '*-devcontainer-utils.sh')"; @@ -49,19 +50,24 @@ reset_state() { unset AWS_SESSION_TOKEN; unset AWS_SECRET_ACCESS_KEY; - rm -rf ~/.aws/ ~/.config/gh/; + while pgrep sccache >/dev/null 2>&1; do + devcontainer-utils-stop-sccache --kill-all; + done + + sudo rm -rf \ + ~/.aws/ \ + ~/.config/gh/ \ + ~/.config/sccache/config \ + /var/log/devcontainer-utils/creds-s3.log; cp /tmp/.bashrc-clean ~/.bashrc; + sudo cp /usr/bin/sccache{.orig,}; echo "#! /usr/bin/env bash" | sudo tee "${utils_profile_script}" >/dev/null; sudo chmod +x "${utils_profile_script}"; . ~/.bashrc; - - if test -n "$(pgrep sccache || echo)"; then - sccache --stop-server >/dev/null 2>&1 || true; - fi } write_bad_creds() { - devcontainer-utils-vault-s3-creds-persist <<< " + devcontainer-utils-creds-s3-persist <<< " --stamp='$(date '+%s')' --bucket='${rw_sccache_bucket:-}' --region='${rw_sccache_region:-}' @@ -83,6 +89,14 @@ expect_local_disk_cache_is_used() { grep -qE 'Cache location \s+ Local disk' <<< "${stats}"; } +expect_sccache_dist_auth_token_is_gh_token() { + set -e; + test -f ~/.config/sccache/config; + grep -qE "[dist.auth]" ~/.config/sccache/config; + grep -qE "type = \"token\"" ~/.config/sccache/config; + grep -qE "token = \"${gh_token}\"" ~/.config/sccache/config; +} + if test -n "${vault_host:+x}" \ && test -n "${rw_sccache_bucket:+x}"; then @@ -245,6 +259,45 @@ if test -n "${gh_token:+x}" \ check "bad stored creds with GH_TOKEN, AWS_ROLE_ARN, and SCCACHE_BUCKET should regenerate credentials" bad_stored_creds_with_GH_TOKEN_AWS_ROLE_ARN_and_SCCACHE_BUCKET_should_regenerate_credentials; fi +if test -n "${gh_token:-}" \ +&& test -n "${sccache_dist_scheduler_url:-}"; then + + does_not_set_sccache_dist_auth_config() { + reset_state; + DEVCONTAINER_UTILS_ENABLE_SCCACHE_DIST=1 \ + SCCACHE_DIST_URL="${sccache_dist_scheduler_url}" \ + devcontainer-utils-post-attach-command; + if expect_sccache_dist_auth_token_is_gh_token; then + return 1; + fi + } + + check "does not configure sccache-dist auth when no GH_TOKEN or SCCACHE_DIST_AUTH_TOKEN" does_not_set_sccache_dist_auth_config; + + uses_GH_TOKEN_as_sccache_dist_auth_token() { + reset_state; + GH_TOKEN="${gh_token}" \ + DEVCONTAINER_UTILS_ENABLE_SCCACHE_DIST=1 \ + SCCACHE_DIST_URL="${sccache_dist_scheduler_url}" \ + devcontainer-utils-post-attach-command; + expect_sccache_dist_auth_token_is_gh_token; + } + + check "configures sccache-dist to use GH_TOKEN as auth token" uses_GH_TOKEN_as_sccache_dist_auth_token; + + # uses_SCCACHE_DIST_AUTH_TOKEN_as_sccache_dist_auth_token() { + # reset_state; + # SCCACHE_DIST_AUTH_TYPE=token \ + # SCCACHE_DIST_AUTH_TOKEN="${gh_token}" \ + # DEVCONTAINER_UTILS_ENABLE_SCCACHE_DIST=1 \ + # SCCACHE_DIST_URL="${sccache_dist_scheduler_url}" \ + # devcontainer-utils-post-attach-command; + # expect_sccache_dist_auth_token_is_gh_token; + # } + + # check "configures sccache-dist to use SCCACHE_DIST_AUTH_TOKEN as auth token" uses_SCCACHE_DIST_AUTH_TOKEN_as_sccache_dist_auth_token; +fi + # Report result # If any of the checks above exited with a non-zero exit code, the test will fail. reportResults