diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 92c387ea6ea..1e44f80b3a7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,3 +65,8 @@ repos: hooks: - id: rapids-dependency-file-generator args: ["--clean"] + - repo: https://github.com/shellcheck-py/shellcheck-py + rev: v0.10.0.1 + hooks: + - id: shellcheck + args: ["--severity=warning"] diff --git a/benchmarks/cugraph/standalone/run_all_nightly_benches.sh b/benchmarks/cugraph/standalone/run_all_nightly_benches.sh index 38d1c496991..46ee2d724da 100644 --- a/benchmarks/cugraph/standalone/run_all_nightly_benches.sh +++ b/benchmarks/cugraph/standalone/run_all_nightly_benches.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2021-2022, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -THIS_SCRIPT_DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd) +THIS_SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) #WEIGHTED_ALGOS="--algo=bfs --algo=sssp" #UNWEIGHTED_ALGOS="--algo=wcc" @@ -34,7 +34,7 @@ for scale in $SCALE_VALUES; do echo "" echo ">>>>>>>>>>>>>>>>> EDGEFACTOR: $edgefactor" #env CUDA_VISIBLE_DEVICES="$gpus" python "$THIS_SCRIPT_DIR"/main.py $WEIGHTED_ALGOS --scale=$scale --symmetric-graph - env CUDA_VISIBLE_DEVICES="$gpus" python "$THIS_SCRIPT_DIR"/main.py $UNWEIGHTED_ALGOS --unweighted --symmetric-graph --scale=$scale --edgefactor=$edgefactor + env CUDA_VISIBLE_DEVICES="$gpus" python "$THIS_SCRIPT_DIR"/main.py "$UNWEIGHTED_ALGOS" --unweighted --symmetric-graph --scale="$scale" --edgefactor="$edgefactor" done done mv out.csv random_scale_"$scale".csv diff --git a/benchmarks/shared/build_cugraph_ucx/build-ucx.sh b/benchmarks/shared/build_cugraph_ucx/build-ucx.sh index df9290cdfe6..2bdead404c7 100644 --- a/benchmarks/shared/build_cugraph_ucx/build-ucx.sh +++ b/benchmarks/shared/build_cugraph_ucx/build-ucx.sh @@ -1,19 +1,19 @@ #!/bin/bash -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. # SPDX-License-Identifier: Apache-2.0 set -ex UCX_VERSION_TAG=${1:-"v1.14.x"} CUDA_HOME=${2:-"/usr/local/cuda"} # Send any remaining arguments to configure -CONFIGURE_ARGS=${@:2} +CONFIGURE_ARGS=("${@:2}") git clone https://github.com/openucx/ucx.git cd ucx -git checkout ${UCX_VERSION_TAG} +git checkout "${UCX_VERSION_TAG}" ./autogen.sh mkdir build-linux && cd build-linux -../contrib/configure-release --prefix=${CONDA_PREFIX} --with-sysroot --enable-cma \ +../contrib/configure-release --prefix="${CONDA_PREFIX}" --with-sysroot --enable-cma \ --enable-mt --enable-numa --with-gnu-ld --with-rdmacm --with-verbs \ - --with-cuda=${CUDA_HOME} \ - ${CONFIGURE_ARGS} + --with-cuda="${CUDA_HOME}" \ + "${CONFIGURE_ARGS[@]}" make -j install diff --git a/build.sh b/build.sh index ebdc6ed5c5c..296208f9a6a 100755 --- a/build.sh +++ b/build.sh @@ -16,9 +16,7 @@ ARGS=$* # NOTE: ensure all dir changes are relative to the location of this # script, and that this script resides in the repo dir! -REPODIR=$(cd $(dirname $0); pwd) - -RAPIDS_VERSION="$(sed -E -e 's/^([0-9]{2})\.([0-9]{2})\.([0-9]{2}).*$/\1.\2/' VERSION)" +REPODIR=$(cd "$(dirname "$0")"; pwd) # Valid args to this script (all possible targets and options) - only one per line VALIDARGS=" @@ -88,40 +86,40 @@ BUILD_DIRS="${LIBCUGRAPH_BUILD_DIR} # Set defaults for vars modified by flags to this script VERBOSE_FLAG="" -CMAKE_VERBOSE_OPTION="" +CMAKE_VERBOSE_OPTION=() BUILD_TYPE=Release -INSTALL_TARGET="--target install" +INSTALL_TARGET=(--target install) BUILD_CPP_TESTS=ON BUILD_CPP_MG_TESTS=OFF BUILD_CPP_MTMG_TESTS=OFF BUILD_ALL_GPU_ARCH=0 -CMAKE_GENERATOR_OPTION="-G Ninja" -PYTHON_ARGS_FOR_INSTALL="-m pip install --no-build-isolation --no-deps --config-settings rapidsai.disable-cuda=true" +CMAKE_GENERATOR_OPTION=(-G Ninja) +PYTHON_ARGS_FOR_INSTALL=(-m pip install --no-build-isolation --no-deps --config-settings rapidsai.disable-cuda=true) # Set defaults for vars that may not have been defined externally # FIXME: if PREFIX is not set, check CONDA_PREFIX, but there is no fallback # from there! INSTALL_PREFIX=${PREFIX:=${CONDA_PREFIX}} -PARALLEL_LEVEL=${PARALLEL_LEVEL:=`nproc`} +PARALLEL_LEVEL=${PARALLEL_LEVEL:=$(nproc)} BUILD_ABI=${BUILD_ABI:=ON} function hasArg { - (( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") + (( NUMARGS != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") } function buildDefault { - (( ${NUMARGS} == 0 )) || !(echo " ${ARGS} " | grep -q " [^-][a-zA-Z0-9\_\-]\+ ") + (( NUMARGS == 0 )) || ! (echo " ${ARGS} " | grep -q " [^-][a-zA-Z0-9\_\-]\+ ") } function cleanPythonDir { - pushd $1 > /dev/null - rm -rf dist dask-worker-space cugraph/raft *.egg-info - find . -type d -name __pycache__ -print | xargs rm -rf - find . -type d -name build -print | xargs rm -rf - find . -type d -name dist -print | xargs rm -rf + pushd "$1" > /dev/null + rm -rf dist dask-worker-space cugraph/raft ./*.egg-info + find . -type d -name __pycache__ -print0 | xargs -0 rm -rf + find . -type d -name build -print0 | xargs -0 rm -rf + find . -type d -name dist -print0 | xargs -0 rm -rf find . -type f -name "*.cpp" -delete find . -type f -name "*.cpython*.so" -delete - find . -type d -name _external_repositories -print | xargs rm -rf + find . -type d -name _external_repositories -print0 | xargs -0 rm -rf popd > /dev/null } @@ -131,7 +129,7 @@ if hasArg -h || hasArg --help; then fi # Check for valid usage -if (( ${NUMARGS} != 0 )); then +if (( NUMARGS != 0 )); then for a in ${ARGS}; do if ! (echo "${VALIDARGS}" | grep -q "^[[:blank:]]*${a}$"); then echo "Invalid option: ${a}" @@ -143,13 +141,13 @@ fi # Process flags if hasArg -v; then VERBOSE_FLAG="-v" - CMAKE_VERBOSE_OPTION="--log-level=VERBOSE" + CMAKE_VERBOSE_OPTION=(--log-level=VERBOSE) fi if hasArg -g; then BUILD_TYPE=Debug fi if hasArg -n; then - INSTALL_TARGET="" + INSTALL_TARGET=() fi if hasArg --allgpuarch; then BUILD_ALL_GPU_ARCH=1 @@ -164,33 +162,33 @@ if hasArg cpp-mgtests || hasArg all; then BUILD_CPP_MG_TESTS=ON fi if hasArg --cmake_default_generator; then - CMAKE_GENERATOR_OPTION="" + CMAKE_GENERATOR_OPTION=() fi if hasArg --pydevelop; then - PYTHON_ARGS_FOR_INSTALL="${PYTHON_ARGS_FOR_INSTALL} -e" + PYTHON_ARGS_FOR_INSTALL+=(-e) fi SKBUILD_EXTRA_CMAKE_ARGS="${EXTRA_CMAKE_ARGS}" # Replace spaces with semicolons in SKBUILD_EXTRA_CMAKE_ARGS -SKBUILD_EXTRA_CMAKE_ARGS=$(echo ${SKBUILD_EXTRA_CMAKE_ARGS} | sed 's/ /;/g') +SKBUILD_EXTRA_CMAKE_ARGS=${SKBUILD_EXTRA_CMAKE_ARGS// /;} # If clean or uninstall targets given, run them prior to any other steps if hasArg uninstall; then if [[ "$INSTALL_PREFIX" != "" ]]; then - rm -rf ${INSTALL_PREFIX}/include/cugraph - rm -f ${INSTALL_PREFIX}/lib/libcugraph.so - rm -rf ${INSTALL_PREFIX}/include/cugraph_c - rm -f ${INSTALL_PREFIX}/lib/libcugraph_c.so - rm -rf ${INSTALL_PREFIX}/include/cugraph_etl - rm -f ${INSTALL_PREFIX}/lib/libcugraph_etl.so - rm -rf ${INSTALL_PREFIX}/lib/cmake/cugraph - rm -rf ${INSTALL_PREFIX}/lib/cmake/cugraph_etl + rm -rf "${INSTALL_PREFIX}/include/cugraph" + rm -f "${INSTALL_PREFIX}/lib/libcugraph.so" + rm -rf "${INSTALL_PREFIX}/include/cugraph_c" + rm -f "${INSTALL_PREFIX}/lib/libcugraph_c.so" + rm -rf "${INSTALL_PREFIX}/include/cugraph_etl" + rm -f "${INSTALL_PREFIX}/lib/libcugraph_etl.so" + rm -rf "${INSTALL_PREFIX}/lib/cmake/cugraph" + rm -rf "${INSTALL_PREFIX}/lib/cmake/cugraph_etl" fi # This may be redundant given the above, but can also be used in case # there are other installed files outside of the locations above. - if [ -e ${LIBCUGRAPH_BUILD_DIR}/install_manifest.txt ]; then - xargs rm -f < ${LIBCUGRAPH_BUILD_DIR}/install_manifest.txt > /dev/null 2>&1 + if [ -e "${LIBCUGRAPH_BUILD_DIR}/install_manifest.txt" ]; then + xargs rm -f < "${LIBCUGRAPH_BUILD_DIR}/install_manifest.txt" > /dev/null 2>&1 fi # uninstall cugraph and pylibcugraph installed from a prior install # FIXME: if multiple versions of these packages are installed, this only @@ -204,7 +202,7 @@ if hasArg clean; then set +e # remove artifacts generated inplace if [[ -d ${REPODIR}/python ]]; then - cleanPythonDir ${REPODIR}/python + cleanPythonDir "${REPODIR}/python" fi # If the dirs to clean are mounted dirs in a container, the contents should @@ -212,9 +210,9 @@ if hasArg clean; then # contents but leaves the dirs, the rmdir attempts to remove the dirs but # can fail safely. for bd in ${BUILD_DIRS}; do - if [ -d ${bd} ]; then - find ${bd} -mindepth 1 -delete - rmdir ${bd} || true + if [ -d "${bd}" ]; then + find "${bd}" -mindepth 1 -delete + rmdir "${bd}" || true fi done # Go back to failing on first error for all other operations @@ -225,128 +223,117 @@ fi # Configure, build, and install libcugraph if buildDefault || hasArg libcugraph || hasArg all; then if hasArg --clean; then - if [ -d ${LIBCUGRAPH_BUILD_DIR} ]; then - find ${LIBCUGRAPH_BUILD_DIR} -mindepth 1 -delete - rmdir ${LIBCUGRAPH_BUILD_DIR} || true + if [ -d "${LIBCUGRAPH_BUILD_DIR}" ]; then + find "${LIBCUGRAPH_BUILD_DIR}" -mindepth 1 -delete + rmdir "${LIBCUGRAPH_BUILD_DIR}" || true fi else - if (( ${BUILD_ALL_GPU_ARCH} == 0 )); then + if (( BUILD_ALL_GPU_ARCH == 0 )); then CUGRAPH_CMAKE_CUDA_ARCHITECTURES="NATIVE" echo "Building for the architecture of the GPU in the system..." else CUGRAPH_CMAKE_CUDA_ARCHITECTURES="RAPIDS" echo "Building for *ALL* supported GPU architectures..." fi - mkdir -p ${LIBCUGRAPH_BUILD_DIR} - cd ${LIBCUGRAPH_BUILD_DIR} + mkdir -p "${LIBCUGRAPH_BUILD_DIR}" + cd "${LIBCUGRAPH_BUILD_DIR}" cmake -B "${LIBCUGRAPH_BUILD_DIR}" -S "${REPODIR}/cpp" \ - -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \ + -DCMAKE_INSTALL_PREFIX="${INSTALL_PREFIX}" \ -DCMAKE_CUDA_ARCHITECTURES=${CUGRAPH_CMAKE_CUDA_ARCHITECTURES} \ -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ -DBUILD_TESTS=${BUILD_CPP_TESTS} \ -DBUILD_CUGRAPH_MG_TESTS=${BUILD_CPP_MG_TESTS} \ - -DBUILD_CUGRAPH_MTMG_TESTS=${BUILD_CPP_MTMG_TESTS} \ - ${CMAKE_GENERATOR_OPTION} \ - ${CMAKE_VERBOSE_OPTION} - cmake --build "${LIBCUGRAPH_BUILD_DIR}" -j${PARALLEL_LEVEL} ${INSTALL_TARGET} ${VERBOSE_FLAG} + -DBUILD_CUGRAPH_MTMG_TESTS=${BUILD_CPP_MTMG_TESTS} \ + "${CMAKE_GENERATOR_OPTION[@]}" \ + "${CMAKE_VERBOSE_OPTION[@]}" + + cmake --build "${LIBCUGRAPH_BUILD_DIR}" "-j${PARALLEL_LEVEL}" "${INSTALL_TARGET[@]}" "${VERBOSE_FLAG}" fi fi # Configure, build, and install libcugraph_etl if buildDefault || hasArg libcugraph_etl || hasArg all; then if hasArg --clean; then - if [ -d ${LIBCUGRAPH_ETL_BUILD_DIR} ]; then - find ${LIBCUGRAPH_ETL_BUILD_DIR} -mindepth 1 -delete - rmdir ${LIBCUGRAPH_ETL_BUILD_DIR} || true + if [ -d "${LIBCUGRAPH_ETL_BUILD_DIR}" ]; then + find "${LIBCUGRAPH_ETL_BUILD_DIR}" -mindepth 1 -delete + rmdir "${LIBCUGRAPH_ETL_BUILD_DIR}" || true fi else - if (( ${BUILD_ALL_GPU_ARCH} == 0 )); then + if (( BUILD_ALL_GPU_ARCH == 0 )); then CUGRAPH_CMAKE_CUDA_ARCHITECTURES="NATIVE" echo "Building for the architecture of the GPU in the system..." else CUGRAPH_CMAKE_CUDA_ARCHITECTURES="RAPIDS" echo "Building for *ALL* supported GPU architectures..." fi - mkdir -p ${LIBCUGRAPH_ETL_BUILD_DIR} - cd ${LIBCUGRAPH_ETL_BUILD_DIR} - cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \ - -DCMAKE_CUDA_ARCHITECTURES=${CUGRAPH_CMAKE_CUDA_ARCHITECTURES} \ - -DDISABLE_DEPRECATION_WARNING=${BUILD_DISABLE_DEPRECATION_WARNING} \ + mkdir -p "${LIBCUGRAPH_ETL_BUILD_DIR}" + cd "${LIBCUGRAPH_ETL_BUILD_DIR}" + cmake -DCMAKE_INSTALL_PREFIX="${INSTALL_PREFIX}" \ + -DCMAKE_CUDA_ARCHITECTURES="${CUGRAPH_CMAKE_CUDA_ARCHITECTURES}" \ + -DDISABLE_DEPRECATION_WARNING="${BUILD_DISABLE_DEPRECATION_WARNING}" \ -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ -DBUILD_TESTS=${BUILD_CPP_TESTS} \ -DBUILD_CUGRAPH_MG_TESTS=${BUILD_CPP_MG_TESTS} \ -DBUILD_CUGRAPH_MTMG_TESTS=${BUILD_CPP_MTMG_TESTS} \ - -DCMAKE_PREFIX_PATH=${LIBCUGRAPH_BUILD_DIR} \ - ${CMAKE_GENERATOR_OPTION} \ - ${CMAKE_VERBOSE_OPTION} \ - ${REPODIR}/cpp/libcugraph_etl - cmake --build "${LIBCUGRAPH_ETL_BUILD_DIR}" -j${PARALLEL_LEVEL} ${INSTALL_TARGET} ${VERBOSE_FLAG} + -DCMAKE_PREFIX_PATH="${LIBCUGRAPH_BUILD_DIR}" \ + "${CMAKE_GENERATOR_OPTION[@]}" \ + "${CMAKE_VERBOSE_OPTION[@]}" \ + "${REPODIR}/cpp/libcugraph_etl" + cmake --build "${LIBCUGRAPH_ETL_BUILD_DIR}" "-j${PARALLEL_LEVEL}" "${INSTALL_TARGET[@]}" "${VERBOSE_FLAG}" fi fi # Build, and install pylibcugraph if buildDefault || hasArg pylibcugraph || hasArg all; then if hasArg --clean; then - cleanPythonDir ${REPODIR}/python/pylibcugraph + cleanPythonDir "${REPODIR}/python/pylibcugraph" else SKBUILD_CMAKE_ARGS="${SKBUILD_EXTRA_CMAKE_ARGS}" \ - python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/pylibcugraph + python "${PYTHON_ARGS_FOR_INSTALL[@]}" "${REPODIR}/python/pylibcugraph" fi fi # Build and install the cugraph Python package if buildDefault || hasArg cugraph || hasArg all; then if hasArg --clean; then - cleanPythonDir ${REPODIR}/python/cugraph + cleanPythonDir "${REPODIR}/python/cugraph" else SKBUILD_CMAKE_ARGS="${SKBUILD_EXTRA_CMAKE_ARGS}" \ - python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph + python "${PYTHON_ARGS_FOR_INSTALL[@]}" "${REPODIR}/python/cugraph" fi fi # Install the cugraph-service-client and cugraph-service-server Python packages if hasArg cugraph-service || hasArg all; then if hasArg --clean; then - cleanPythonDir ${REPODIR}/python/cugraph-service + cleanPythonDir "${REPODIR}/python/cugraph-service" else - python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph-service/client - python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph-service/server + python "${PYTHON_ARGS_FOR_INSTALL[@]}" "${REPODIR}/python/cugraph-service/client" + python "${PYTHON_ARGS_FOR_INSTALL[@]}" "${REPODIR}/python/cugraph-service/server" fi fi # Build the docs if hasArg docs || hasArg all; then - if [ ! -d ${LIBCUGRAPH_BUILD_DIR} ]; then - mkdir -p ${LIBCUGRAPH_BUILD_DIR} - cd ${LIBCUGRAPH_BUILD_DIR} + if [ ! -d "${LIBCUGRAPH_BUILD_DIR}" ]; then + mkdir -p "${LIBCUGRAPH_BUILD_DIR}" + cd "${LIBCUGRAPH_BUILD_DIR}" cmake -B "${LIBCUGRAPH_BUILD_DIR}" -S "${REPODIR}/cpp" \ - -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \ - -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ - ${CMAKE_GENERATOR_OPTION} \ - ${CMAKE_VERBOSE_OPTION} + -DCMAKE_INSTALL_PREFIX="${INSTALL_PREFIX}" \ + -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \ + "${CMAKE_GENERATOR_OPTION[@]}" \ + "${CMAKE_VERBOSE_OPTION[@]}" fi - # for PROJECT in libwholegraph; do - # XML_DIR="${REPODIR}/docs/cugraph/${PROJECT}" - # rm -rf "${XML_DIR}" - # mkdir -p "${XML_DIR}" - # export XML_DIR_${PROJECT^^}="$XML_DIR" - - # echo "downloading xml for ${PROJECT} into ${XML_DIR}. Environment variable XML_DIR_${PROJECT^^} is set to ${XML_DIR}" - # curl -O "https://d1664dvumjb44w.cloudfront.net/${PROJECT}/xml_tar/${RAPIDS_VERSION}/xml.tar.gz" - # tar -xzf xml.tar.gz -C "${XML_DIR}" - # rm "./xml.tar.gz" - # done - - cd ${LIBCUGRAPH_BUILD_DIR} - cmake --build "${LIBCUGRAPH_BUILD_DIR}" -j${PARALLEL_LEVEL} --target docs_cugraph ${VERBOSE_FLAG} + cd "${LIBCUGRAPH_BUILD_DIR}" + cmake --build "${LIBCUGRAPH_BUILD_DIR}" "-j${PARALLEL_LEVEL}" --target docs_cugraph ${VERBOSE_FLAG} echo "making libcugraph doc dir" - rm -rf ${REPODIR}/docs/cugraph/libcugraph - mkdir -p ${REPODIR}/docs/cugraph/libcugraph + rm -rf "${REPODIR}/docs/cugraph/libcugraph" + mkdir -p "${REPODIR}/docs/cugraph/libcugraph" export XML_DIR_LIBCUGRAPH="${REPODIR}/cpp/doxygen/xml" - cd ${REPODIR}/docs/cugraph + cd "${REPODIR}/docs/cugraph" make html fi diff --git a/ci/build_docs.sh b/ci/build_docs.sh index 3a0e936177b..8d27255e281 100755 --- a/ci/build_docs.sh +++ b/ci/build_docs.sh @@ -4,10 +4,8 @@ set -euo pipefail if [[ "${RAPIDS_CUDA_VERSION}" == "11.8.0" ]]; then - CONDA_CUDA_VERSION="11.8" DGL_CHANNEL="dglteam/label/th23_cu118" else - CONDA_CUDA_VERSION="12.1" DGL_CHANNEL="dglteam/label/th23_cu121" fi @@ -18,9 +16,12 @@ PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python) rapids-logger "Create test conda environment" . /opt/conda/etc/profile.d/conda.sh -export RAPIDS_VERSION="$(rapids-version)" -export RAPIDS_VERSION_MAJOR_MINOR="$(rapids-version-major-minor)" -export RAPIDS_VERSION_NUMBER="$RAPIDS_VERSION_MAJOR_MINOR" +RAPIDS_VERSION="$(rapids-version)" +export RAPIDS_VERSION +RAPIDS_VERSION_MAJOR_MINOR="$(rapids-version-major-minor)" +export RAPIDS_VERSION_MAJOR_MINOR +RAPIDS_VERSION_NUMBER="$RAPIDS_VERSION_MAJOR_MINOR" +export RAPIDS_VERSION_NUMBER rapids-dependency-file-generator \ --output conda \ @@ -38,12 +39,14 @@ conda activate docs rapids-print-env -export RAPIDS_DOCS_DIR="$(mktemp -d)" +RAPIDS_DOCS_DIR="$(mktemp -d)" +export RAPIDS_DOCS_DIR rapids-logger "Build CPP docs" pushd cpp/doxygen doxygen Doxyfile -export XML_DIR_LIBCUGRAPH="$(pwd)/xml" +XML_DIR_LIBCUGRAPH="$(pwd)/xml" +export XML_DIR_LIBCUGRAPH mkdir -p "${RAPIDS_DOCS_DIR}/libcugraph/xml_tar" tar -czf "${RAPIDS_DOCS_DIR}/libcugraph/xml_tar"/xml.tar.gz -C xml . popd diff --git a/ci/build_wheel.sh b/ci/build_wheel.sh index 4f999f2cc40..ac1c4f0a36f 100755 --- a/ci/build_wheel.sh +++ b/ci/build_wheel.sh @@ -10,7 +10,7 @@ package_type=$3 source rapids-configure-sccache source rapids-date-string -RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" +RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen "${RAPIDS_CUDA_VERSION}")" rapids-generate-version > ./VERSION diff --git a/ci/build_wheel_cugraph.sh b/ci/build_wheel_cugraph.sh index aa68d372d76..1a184543b32 100755 --- a/ci/build_wheel_cugraph.sh +++ b/ci/build_wheel_cugraph.sh @@ -5,7 +5,7 @@ set -euo pipefail package_dir="python/cugraph" -RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" +RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen "${RAPIDS_CUDA_VERSION}")" # Download the libcugraph and pylibcugraph wheels built in the previous step and make them # available for pip to find. @@ -13,8 +13,8 @@ LIBCUGRAPH_WHEELHOUSE=$(RAPIDS_PY_WHEEL_NAME="libcugraph_${RAPIDS_PY_CUDA_SUFFIX PYLIBCUGRAPH_WHEELHOUSE=$(RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python /tmp/pylibcugraph_dist) cat >> ./constraints.txt <> ./constraints.txt <. for next version -NEXT_MAJOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[1]}') -NEXT_MINOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[2]}') +NEXT_MAJOR=$(echo "$NEXT_FULL_TAG" | awk '{split($0, a, "."); print a[1]}') +NEXT_MINOR=$(echo "$NEXT_FULL_TAG" | awk '{split($0, a, "."); print a[2]}') NEXT_SHORT_TAG=${NEXT_MAJOR}.${NEXT_MINOR} -NEXT_UCXX_SHORT_TAG="$(curl -sL https://version.gpuci.io/rapids/${NEXT_SHORT_TAG})" +NEXT_UCXX_SHORT_TAG="$(curl -sL https://version.gpuci.io/rapids/"${NEXT_SHORT_TAG}")" echo "Preparing release $CURRENT_TAG => $NEXT_FULL_TAG" # Inplace sed replace; workaround for Linux and Mac function sed_runner() { - sed -i.bak ''"$1"'' $2 && rm -f ${2}.bak + sed -i.bak ''"$1"'' "$2" && rm -f "${2}".bak } # Centralized version file update diff --git a/ci/test.sh b/ci/test.sh index 8e19b6c8c18..0b644040a8f 100755 --- a/ci/test.sh +++ b/ci/test.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -20,8 +20,8 @@ trap "EXITCODE=1" ERR NUMARGS=$# ARGS=$* -THISDIR=$(cd $(dirname $0);pwd) -CUGRAPH_ROOT=$(cd ${THISDIR}/..;pwd) +THISDIR=$(cd "$(dirname "$0")";pwd) +CUGRAPH_ROOT=$(cd "${THISDIR}"/..;pwd) GTEST_ARGS="--gtest_output=xml:${CUGRAPH_ROOT}/test-results/" DOWNLOAD_MODE="" EXITCODE=0 @@ -30,7 +30,7 @@ export RAPIDS_DATASET_ROOT_DIR=${RAPIDS_DATASET_ROOT_DIR:-${CUGRAPH_ROOT}/datase # FIXME: consider using getopts for option parsing function hasArg { - (( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") + (( NUMARGS != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") } # Add options unique to running a "quick" subset of tests here: @@ -50,12 +50,12 @@ if hasArg "--skip-download"; then echo "Using datasets in ${RAPIDS_DATASET_ROOT_DIR}" else echo "Download datasets..." - cd ${RAPIDS_DATASET_ROOT_DIR} + cd "${RAPIDS_DATASET_ROOT_DIR}" bash ./get_test_data.sh ${DOWNLOAD_MODE} fi if [[ -z "$PROJECT_FLASH" || "$PROJECT_FLASH" == "0" ]]; then - cd ${CUGRAPH_ROOT}/cpp/build + cd "${CUGRAPH_ROOT}"/cpp/build fi # Do not abort the script on error from this point on. This allows all tests to @@ -66,16 +66,16 @@ set +e if hasArg "--run-cpp-tests"; then echo "C++ gtests for cuGraph (single-GPU only)..." for gt in "${CONDA_PREFIX}/bin/gtests/libcugraph/"*_TEST; do - test_name=$(basename $gt) + test_name=$(basename "$gt") echo "Running gtest $test_name" - ${gt} ${GTEST_FILTER} ${GTEST_ARGS} + ${gt} "${GTEST_FILTER}" "${GTEST_ARGS}" echo "Ran gtest $test_name : return code was: $?, test script exit code is now: $EXITCODE" done # FIXME: the C API tests do not generate XML, so CI systems will not show # them in the GUI. Failing C API tests will still fail CI though, and the # output will appear in logs. for ct in "${CONDA_PREFIX}/bin/gtests/libcugraph_c/"CAPI_*_TEST; do - test_name=$(basename $ct) + test_name=$(basename "$ct") echo "Running C API test $test_name" ${ct} echo "Ran C API test $test_name : return code was: $?, test script exit code is now: $EXITCODE" @@ -84,24 +84,24 @@ fi if hasArg "--run-python-tests"; then echo "Python pytest for pylibcugraph..." - cd ${CUGRAPH_ROOT}/python/pylibcugraph/pylibcugraph - pytest -sv --cache-clear --junitxml=${CUGRAPH_ROOT}/junit-pylibcugraph-pytests.xml --cov-config=.coveragerc --cov=pylibcugraph --cov-report=xml:${WORKSPACE}/python/pylibcugraph/pylibcugraph-coverage.xml --cov-report term --ignore=raft --benchmark-disable + cd "${CUGRAPH_ROOT}"/python/pylibcugraph/pylibcugraph + pytest -sv --cache-clear --junitxml="${CUGRAPH_ROOT}"/junit-pylibcugraph-pytests.xml --cov-config=.coveragerc --cov=pylibcugraph --cov-report=xml:"{WORKSPACE}"/python/pylibcugraph/pylibcugraph-coverage.xml --cov-report term --ignore=raft --benchmark-disable echo "Ran Python pytest for pylibcugraph : return code was: $?, test script exit code is now: $EXITCODE" echo "Python pytest for cuGraph (single-GPU only)..." conda list - cd ${CUGRAPH_ROOT}/python/cugraph/cugraph - pytest -sv -m sg --cache-clear --junitxml=${CUGRAPH_ROOT}/junit-cugraph-pytests.xml --cov-config=.coveragerc --cov=cugraph --cov-report=xml:${WORKSPACE}/python/cugraph/cugraph-coverage.xml --cov-report term --ignore=raft --benchmark-disable + cd "${CUGRAPH_ROOT}"/python/cugraph/cugraph + pytest -sv -m sg --cache-clear --junitxml="${CUGRAPH_ROOT}"/junit-cugraph-pytests.xml --cov-config=.coveragerc --cov=cugraph --cov-report=xml:"{WORKSPACE}"/python/cugraph/cugraph-coverage.xml --cov-report term --ignore=raft --benchmark-disable echo "Ran Python pytest for cugraph : return code was: $?, test script exit code is now: $EXITCODE" echo "Python benchmarks for cuGraph (running as tests)..." - cd ${CUGRAPH_ROOT}/benchmarks/cugraph + cd "${CUGRAPH_ROOT}"/benchmarks/cugraph pytest -sv -m sg -m "managedmem_on and poolallocator_on and tiny" --benchmark-disable echo "Ran Python benchmarks for cuGraph (running as tests) : return code was: $?, test script exit code is now: $EXITCODE" echo "Python pytest for cugraph-service (single-GPU only)..." - cd ${CUGRAPH_ROOT}/python/cugraph-service - pytest -sv --cache-clear --junitxml=${CUGRAPH_ROOT}/junit-cugraph-service-pytests.xml --benchmark-disable -k "not mg" ./tests + cd "${CUGRAPH_ROOT}"/python/cugraph-service + pytest -sv --cache-clear --junitxml="${CUGRAPH_ROOT}"/junit-cugraph-service-pytests.xml --benchmark-disable -k "not mg" ./tests echo "Ran Python pytest for cugraph-service : return code was: $?, test script exit code is now: $EXITCODE" fi diff --git a/ci/test_cpp.sh b/ci/test_cpp.sh index 609507d58cf..183a70c5a8c 100755 --- a/ci/test_cpp.sh +++ b/ci/test_cpp.sh @@ -34,7 +34,8 @@ rapids-logger "Check GPU usage" nvidia-smi # RAPIDS_DATASET_ROOT_DIR is used by test scripts -export RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)" +RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)" +export RAPIDS_DATASET_ROOT_DIR pushd "${RAPIDS_DATASET_ROOT_DIR}" ./get_test_data.sh --cpp_ci_subset popd @@ -49,4 +50,4 @@ rapids-logger "Run gtests" ./ci/run_ctests.sh -j10 && EXITCODE=$? || EXITCODE=$?; rapids-logger "Test script exiting with value: $EXITCODE" -exit ${EXITCODE} +exit "${EXITCODE}" diff --git a/ci/test_python.sh b/ci/test_python.sh index ba52513f827..53f1f253378 100755 --- a/ci/test_python.sh +++ b/ci/test_python.sh @@ -40,7 +40,8 @@ nvidia-smi export LD_PRELOAD="${CONDA_PREFIX}/lib/libgomp.so.1" # RAPIDS_DATASET_ROOT_DIR is used by test scripts -export RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)" +RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)" +export RAPIDS_DATASET_ROOT_DIR EXITCODE=0 trap "EXITCODE=1" ERR diff --git a/ci/test_wheel.sh b/ci/test_wheel.sh index c96e91b037c..d1b01d1866a 100755 --- a/ci/test_wheel.sh +++ b/ci/test_wheel.sh @@ -5,19 +5,19 @@ set -eoxu pipefail package_name=$1 -python_package_name=$(echo ${package_name}|sed 's/-/_/g') +python_package_name=${package_name//-/_} # Run smoke tests for aarch64 pull requests arch=$(uname -m) if [[ "${arch}" == "aarch64" && ${RAPIDS_BUILD_TYPE} == "pull-request" ]]; then - python ./ci/wheel_smoke_test_${package_name}.py + python ./ci/wheel_smoke_test_"${package_name}".py else # Test runs that include tests that use dask require # --import-mode=append. See test_python.sh for details. # FIXME: Adding PY_IGNORE_IMPORTMISMATCH=1 to workaround conftest.py import # mismatch error seen by nx-cugraph after using pytest 8 and # --import-mode=append. - RAPIDS_DATASET_ROOT_DIR=`pwd`/datasets \ + RAPIDS_DATASET_ROOT_DIR=$(pwd)/datasets \ PY_IGNORE_IMPORTMISMATCH=1 \ DASK_WORKER_DEVICES="0" \ DASK_DISTRIBUTED__SCHEDULER__WORKER_TTL="1000s" \ @@ -28,5 +28,5 @@ else --import-mode=append \ --benchmark-disable \ -k "not test_property_graph_mg and not test_bulk_sampler_io" \ - ./python/${package_name}/${python_package_name}/tests + "./python/${package_name}/${python_package_name}/tests" fi diff --git a/ci/test_wheel_cugraph.sh b/ci/test_wheel_cugraph.sh index 92114656157..1b1b2aafb04 100755 --- a/ci/test_wheel_cugraph.sh +++ b/ci/test_wheel_cugraph.sh @@ -5,7 +5,7 @@ set -eoxu pipefail # Download the packages built in the previous step mkdir -p ./dist -RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" +RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen "${RAPIDS_CUDA_VERSION}")" RAPIDS_PY_WHEEL_NAME="cugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./dist RAPIDS_PY_WHEEL_NAME="libcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 cpp ./local-libcugraph-dep RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./local-pylibcugraph-dep diff --git a/ci/test_wheel_pylibcugraph.sh b/ci/test_wheel_pylibcugraph.sh index 8e4cccac07b..34e00dbd727 100755 --- a/ci/test_wheel_pylibcugraph.sh +++ b/ci/test_wheel_pylibcugraph.sh @@ -5,7 +5,7 @@ set -eoxu pipefail # Download the packages built in the previous step mkdir -p ./dist -RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" +RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen "${RAPIDS_CUDA_VERSION}")" RAPIDS_PY_WHEEL_NAME="libcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 cpp ./local-libcugraph-dep RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./dist diff --git a/ci/utils/nbtest.sh b/ci/utils/nbtest.sh index ae8b52df106..b90aa83a231 100755 --- a/ci/utils/nbtest.sh +++ b/ci/utils/nbtest.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2019-2021, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -48,22 +48,22 @@ get_ipython().run_cell_magic=my_run_cell_magic NO_COLORS=--colors=NoColor EXITCODE=0 NBTMPDIR=${WORKSPACE}/tmp -mkdir -p ${NBTMPDIR} +mkdir -p "${NBTMPDIR}" -for nb in $*; do - NBFILENAME=$1 +for nb in "$@"; do + NBFILENAME=$nb NBNAME=${NBFILENAME%.*} NBNAME=${NBNAME##*/} NBTESTSCRIPT=${NBTMPDIR}/${NBNAME}-test.py shift echo -------------------------------------------------------------------------------- - echo STARTING: ${NBNAME} + echo STARTING: "${NBNAME}" echo -------------------------------------------------------------------------------- - jupyter nbconvert --to script ${NBFILENAME} --output ${NBTMPDIR}/${NBNAME}-test - echo "${MAGIC_OVERRIDE_CODE}" > ${NBTMPDIR}/tmpfile - cat ${NBTESTSCRIPT} >> ${NBTMPDIR}/tmpfile - mv ${NBTMPDIR}/tmpfile ${NBTESTSCRIPT} + jupyter nbconvert --to script "${NBFILENAME}" --output "${NBTMPDIR}/${NBNAME}-test" + echo "${MAGIC_OVERRIDE_CODE}" > "${NBTMPDIR}/tmpfile" + cat "${NBTESTSCRIPT}" >> "${NBTMPDIR}/tmpfile" + mv "${NBTMPDIR}/tmpfile" "${NBTESTSCRIPT}" echo "Running \"ipython ${NO_COLORS} ${NBTESTSCRIPT}\" on $(date)" echo diff --git a/ci/validate_wheel.sh b/ci/validate_wheel.sh index c4bd01faabb..40bb27feeb3 100755 --- a/ci/validate_wheel.sh +++ b/ci/validate_wheel.sh @@ -12,10 +12,10 @@ rapids-logger "validate packages with 'pydistcheck'" pydistcheck \ --inspect \ - "$(echo ${wheel_dir_relative_path}/*.whl)" + "$(echo "${wheel_dir_relative_path}"/*.whl)" rapids-logger "validate packages with 'twine'" twine check \ --strict \ - "$(echo ${wheel_dir_relative_path}/*.whl)" + "$(echo "${wheel_dir_relative_path}"/*.whl)" diff --git a/cpp/examples/build.sh b/cpp/examples/build.sh index 37b0a8bb391..1c086e64cc8 100755 --- a/cpp/examples/build.sh +++ b/cpp/examples/build.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, NVIDIA CORPORATION. +# Copyright (c) 2024-2025, NVIDIA CORPORATION. # script for building libcugraph examples @@ -45,12 +45,12 @@ HELP="$0 [ ...] [ ...] --help - print this text " -if (( ${NUMARGS} == 0 )); then +if (( NUMARGS == 0 )); then echo "${HELP}" fi # Check for valid usage -if (( ${NUMARGS} != 0 )); then +if (( NUMARGS != 0 )); then for a in ${ARGS}; do if ! (echo "${VALIDARGS}" | grep -q "^[[:blank:]]*${a}$"); then echo "Invalid option: ${a}" @@ -60,7 +60,7 @@ if (( ${NUMARGS} != 0 )); then fi function hasArg { - (( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") + (( NUMARGS != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") } if hasArg -h || hasArg --help; then @@ -76,13 +76,13 @@ fi if hasArg clean; then # Ignore errors for clean since missing files, etc. are not failures set +e - for idx in ${!EXAMPLES[@]} + for idx in "${!EXAMPLES[@]}" do current_example=${EXAMPLES[$idx]} build_dir="${EXAMPLES_ROOT_DIR}/${current_example}/build" - if [ -d ${build_dir} ]; then - find ${build_dir} -mindepth 1 -delete - rmdir ${build_dir} || true + if [ -d "${build_dir}" ]; then + find "${build_dir}" -mindepth 1 -delete + rmdir "${build_dir}" || true echo "Removed ${build_dir}" fi done @@ -97,15 +97,15 @@ build_example() { build_dir="${example_dir}/build" # Configure - cmake -S ${example_dir} -B ${build_dir} -Dcugraph_ROOT="${CUGRAPH_BUILD_DIR}" ${CMAKE_VERBOSE_OPTION} + cmake -S "${example_dir}" -B "${build_dir}" -Dcugraph_ROOT="${CUGRAPH_BUILD_DIR}" ${CMAKE_VERBOSE_OPTION} # Build - cmake --build ${build_dir} -j${PARALLEL_LEVEL} ${VERBOSE_FLAG} + cmake --build "${build_dir}" "-j${PARALLEL_LEVEL}" "${VERBOSE_FLAG}" } if hasArg all; then - for idx in ${!EXAMPLES[@]} + for idx in "${!EXAMPLES[@]}" do current_example=${EXAMPLES[$idx]} - build_example $current_example + build_example "$current_example" done fi diff --git a/datasets/get_test_data.sh b/datasets/get_test_data.sh index 6778166ab6e..0c8f8394b52 100755 --- a/datasets/get_test_data.sh +++ b/datasets/get_test_data.sh @@ -1,4 +1,5 @@ -# Copyright (c) 2021-2024, NVIDIA CORPORATION. +#!/bin/bash +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash set -e set -o pipefail @@ -82,7 +82,7 @@ self_loops NUMARGS=$# ARGS=$* function hasArg { - (( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") + (( NUMARGS != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") } if hasArg -h || hasArg --help; then @@ -104,7 +104,9 @@ else DATASET_DATA="${BASE_DATASET_DATA} ${EXTENDED_DATASET_DATA}" fi +# shellcheck disable=SC2207 URLS=($(echo "$DATASET_DATA"|awk '{if (NR%4 == 3) print $0}')) # extract 3rd fields to a bash array +# shellcheck disable=SC2207 DESTDIRS=($(echo "$DATASET_DATA"|awk '{if (NR%4 == 0) print $0}')) # extract 4th fields to a bash array echo Downloading ... @@ -112,8 +114,8 @@ echo Downloading ... # Download all tarfiles to a tmp dir mkdir -p tmp cd tmp -for url in ${URLS[*]}; do - time wget -N --progress=dot:giga ${url} +for url in "${URLS[@]}"; do + time wget -N --progress=dot:giga "${url}" done cd .. @@ -123,6 +125,7 @@ mkdir -p "${DESTDIRS[@]}" # Iterate over the arrays and untar the nth tarfile to the nth dest directory. # The tarfile name is derived from the download url. echo Decompressing ... +# shellcheck disable=SC2016 for index in ${!DESTDIRS[*]}; do echo "tmp/$(basename "${URLS[$index]}") -C ${DESTDIRS[$index]}" | tr '\n' '\0' -done | xargs -0 -t -r -n1 -P$(nproc --all) sh -c 'tar -xzvf $0 --overwrite' +done | xargs -0 -t -r -n1 -P"$(nproc --all)" sh -c 'tar -xzvf $0 --overwrite' diff --git a/mg_utils/default-config.sh b/mg_utils/default-config.sh index 26cef2aee78..67cb85b429e 100755 --- a/mg_utils/default-config.sh +++ b/mg_utils/default-config.sh @@ -1,4 +1,5 @@ -# Copyright (c) 2022-2023, NVIDIA CORPORATION. +#!/bin/bash +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -THIS_DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd) +THIS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Most are defined using the bash := or :- syntax, which means they # will be set only if they were previously unset. The project config @@ -20,7 +21,6 @@ THIS_DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd) # file that should not be overridded by a project, then they will # simply not use that syntax and override, since these variables are # read last. -SCRIPTS_DIR=$THIS_DIR WORKSPACE=$THIS_DIR # These really should be oerridden by the project config! @@ -36,4 +36,4 @@ DASK_HOST_MEMORY_LIMIT=${DASK_HOST_MEMORY_LIMIT:-auto} BUILD_LOG_FILE=${BUILD_LOG_FILE:-${RESULTS_DIR}/build_log.txt} SCHEDULER_FILE=${SCHEDULER_FILE:-${WORKSPACE}/dask-scheduler.json} DATE=${DATE:-$(date --utc "+%Y-%m-%d_%H:%M:%S")_UTC} -ENV_EXPORT_FILE=${ENV_EXPORT_FILE:-${WORKSPACE}/$(basename ${CONDA_ENV})-${DATE}.txt} +ENV_EXPORT_FILE=${ENV_EXPORT_FILE:-${WORKSPACE}/$(basename "${CONDA_ENV}")-${DATE}.txt} diff --git a/mg_utils/functions.sh b/mg_utils/functions.sh index 7eedb5f1b1f..d685d0947cf 100644 --- a/mg_utils/functions.sh +++ b/mg_utils/functions.sh @@ -1,4 +1,5 @@ -# Copyright (c) 2022-2023, NVIDIA CORPORATION. +#!/bin/bash +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -19,11 +20,11 @@ NUMARGS=$# ARGS=$* function hasArg { - (( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") + (( NUMARGS != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") } function logger { - echo -e ">>>> $@" + echo -e ">>>> $*" } # Calling "setTee outfile" will cause all stdout and stderr of the @@ -40,7 +41,7 @@ function setTee { teeFile=$1 # Create a named pipe. pipeName=$(mktemp -u) - mkfifo $pipeName + mkfifo "$pipeName" # Close the currnet 1 and 2 and restore to original (3, 4) in the # event this function is called repeatedly. exec 1>&- 2>&- @@ -49,9 +50,9 @@ function setTee { # and stderr to the named pipe which goes to the tee process. The # named pipe "file" can be removed and the tee process stays alive # until the fd is closed. - tee -a < $pipeName $teeFile & - exec > $pipeName 2>&1 - rm $pipeName + tee -a < "$pipeName" "$teeFile" & + exec > "$pipeName" 2>&1 + rm "$pipeName" } # Call this to stop script output from going to "tee" after a prior diff --git a/mg_utils/run-dask-process.sh b/mg_utils/run-dask-process.sh index b88abb685ec..634a2470bd5 100755 --- a/mg_utils/run-dask-process.sh +++ b/mg_utils/run-dask-process.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2022-2023, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -THIS_DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd) +THIS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -source ${THIS_DIR}/default-config.sh -source ${THIS_DIR}/functions.sh +source "${THIS_DIR}/default-config.sh" +source "${THIS_DIR}/functions.sh" # Logs can be written to a specific location by setting the LOGS_DIR # env var. @@ -25,7 +25,7 @@ LOGS_DIR=${LOGS_DIR:-dask_logs-$$} NUMARGS=$# ARGS=$* function hasArg { - (( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") + (( NUMARGS != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") } VALIDARGS="-h --help scheduler workers --tcp --ucx --ucxib --ucx-ib" HELP="$0 [ ...] [ ...] @@ -49,7 +49,7 @@ CLUSTER_CONFIG_TYPE=${CLUSTER_CONFIG_TYPE:-TCP} START_SCHEDULER=0 START_WORKERS=0 -if (( ${NUMARGS} == 0 )); then +if (( NUMARGS == 0 )); then echo "${HELP}" exit 0 else @@ -187,17 +187,17 @@ worker_pid="" num_scheduler_tries=0 function startScheduler { - mkdir -p $(dirname $SCHEDULER_FILE) - echo "RUNNING: \"python -m distributed.cli.dask_scheduler $SCHEDULER_ARGS\"" > $SCHEDULER_LOG - dask-scheduler $SCHEDULER_ARGS >> $SCHEDULER_LOG 2>&1 & + mkdir -p "$(dirname "$SCHEDULER_FILE")" + echo "RUNNING: \"python -m distributed.cli.dask_scheduler $SCHEDULER_ARGS\"" > "$SCHEDULER_LOG" + dask-scheduler "$SCHEDULER_ARGS" >> "$SCHEDULER_LOG" 2>&1 & scheduler_pid=$! } -mkdir -p $LOGS_DIR +mkdir -p "$LOGS_DIR" logger "Logs written to: $LOGS_DIR" if [[ $START_SCHEDULER == 1 ]]; then - rm -f $SCHEDULER_FILE $SCHEDULER_LOG $WORKERS_LOG + rm -f "$SCHEDULER_FILE" "$SCHEDULER_LOG" "$WORKERS_LOG" startScheduler sleep 6 @@ -213,7 +213,7 @@ if [[ $START_SCHEDULER == 1 ]]; then echo "scheduler failed to start, retry #$num_scheduler_tries" startScheduler sleep 6 - num_scheduler_tries=$(echo $num_scheduler_tries+1 | bc) + num_scheduler_tries=$(echo "$num_scheduler_tries"+1 | bc) else echo "could not start scheduler, exiting." exit 1 @@ -224,13 +224,13 @@ if [[ $START_SCHEDULER == 1 ]]; then fi if [[ $START_WORKERS == 1 ]]; then - rm -f $WORKERS_LOG + rm -f "$WORKERS_LOG" while [ ! -f "$SCHEDULER_FILE" ]; do echo "run-dask-process.sh: $SCHEDULER_FILE not present - waiting to start workers..." sleep 2 done - echo "RUNNING: \"python -m dask_cuda.cli.dask_cuda_worker $WORKER_ARGS\"" > $WORKERS_LOG - dask-cuda-worker $WORKER_ARGS >> $WORKERS_LOG 2>&1 & + echo "RUNNING: \"python -m dask_cuda.cli.dask_cuda_worker $WORKER_ARGS\"" > "$WORKERS_LOG" + dask-cuda-worker "$WORKER_ARGS" >> "$WORKERS_LOG" 2>&1 & worker_pid=$! echo "worker(s) started." fi diff --git a/notebooks/cugraph_benchmarks/dataPrep.sh b/notebooks/cugraph_benchmarks/dataPrep.sh index b59130a98df..b78a8b7717a 100755 --- a/notebooks/cugraph_benchmarks/dataPrep.sh +++ b/notebooks/cugraph_benchmarks/dataPrep.sh @@ -1,18 +1,19 @@ #!/bin/bash +# Copyright (c) 2025, NVIDIA CORPORATION. if [ ! -d "./data" ] then mkdir ./data fi -cd data +cd data || exit 1 if [ ! -f "./preferentialAttachment.mtx" ] then if [ ! -d "./tmp" ] then mkdir tmp - cd tmp + cd tmp || exit 1 wget -N https://sparse.tamu.edu/MM/DIMACS10/preferentialAttachment.tar.gz wget -N https://sparse.tamu.edu/MM/DIMACS10/caidaRouterLevel.tar.gz diff --git a/python/cugraph-service/tests/multi_client_test_runner.sh b/python/cugraph-service/tests/multi_client_test_runner.sh index 70433de0970..775d27725c2 100644 --- a/python/cugraph-service/tests/multi_client_test_runner.sh +++ b/python/cugraph-service/tests/multi_client_test_runner.sh @@ -1,4 +1,5 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +#!/bin/bash +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/scripts/dask/run-dask-process.sh b/scripts/dask/run-dask-process.sh index 9eef17fc5e5..806372a4e15 100755 --- a/scripts/dask/run-dask-process.sh +++ b/scripts/dask/run-dask-process.sh @@ -60,19 +60,17 @@ DASK_CLUSTER_CONFIG_TYPE=${DASK_CLUSTER_CONFIG_TYPE:-TCP} ################################################################################ # FUNCTIONS -numargs=$# -args=$* hasArg () { - (( ${numargs} != 0 )) && (echo " ${args} " | grep -q " $1 ") + (( NUMARGS != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") } logger_prefix=">>>> " logger () { if (( $# > 0 )) && [ "$1" == "-p" ]; then shift - echo -e "${logger_prefix}$@" + echo -e "${logger_prefix}$*" else - echo -e "$(date --utc "+%D-%T.%N")_UTC${logger_prefix}$@" + echo -e "$(date --utc "+%D-%T.%N")_UTC${logger_prefix}$*" fi } @@ -153,9 +151,9 @@ worker_pid="" num_scheduler_tries=0 startScheduler () { - mkdir -p $(dirname $SCHEDULER_FILE) - echo "RUNNING: \"dask scheduler $SCHEDULER_ARGS\"" > $DASK_SCHEDULER_LOG - dask scheduler $SCHEDULER_ARGS >> $DASK_SCHEDULER_LOG 2>&1 & + mkdir -p "$(dirname "$SCHEDULER_FILE")" + echo "RUNNING: \"dask scheduler $SCHEDULER_ARGS\"" > "$DASK_SCHEDULER_LOG" + dask scheduler "$SCHEDULER_ARGS" >> "$DASK_SCHEDULER_LOG" 2>&1 & scheduler_pid=$! } @@ -166,7 +164,7 @@ startScheduler () { START_SCHEDULER=0 START_WORKERS=0 -if (( ${NUMARGS} == 0 )); then +if (( NUMARGS == 0 )); then echo "${HELP}" exit 0 else @@ -220,11 +218,11 @@ else buildTcpArgs fi -mkdir -p $DASK_LOGS_DIR +mkdir -p "$DASK_LOGS_DIR" logger "Logs written to: $DASK_LOGS_DIR" if [[ $START_SCHEDULER == 1 ]]; then - rm -f $SCHEDULER_FILE $DASK_SCHEDULER_LOG $DASK_WORKERS_LOG + rm -f "$SCHEDULER_FILE" "$DASK_SCHEDULER_LOG" "$DASK_WORKERS_LOG" startScheduler sleep 6 @@ -251,13 +249,13 @@ if [[ $START_SCHEDULER == 1 ]]; then fi if [[ $START_WORKERS == 1 ]]; then - rm -f $DASK_WORKERS_LOG + rm -f "$DASK_WORKERS_LOG" while [ ! -f "$SCHEDULER_FILE" ]; do logger "run-dask-process.sh: $SCHEDULER_FILE not present - waiting to start workers..." sleep 2 done - echo "RUNNING: \"dask_cuda_worker $WORKER_ARGS\"" > $DASK_WORKERS_LOG - dask-cuda-worker $WORKER_ARGS >> $DASK_WORKERS_LOG 2>&1 & + echo "RUNNING: \"dask_cuda_worker $WORKER_ARGS\"" > "$DASK_WORKERS_LOG" + dask-cuda-worker "$WORKER_ARGS" >> "$DASK_WORKERS_LOG" 2>&1 & worker_pid=$! logger "worker(s) started." fi