diff --git a/Dockerfile-build b/Dockerfile-build index c0281dc15..a5d08fdeb 100644 --- a/Dockerfile-build +++ b/Dockerfile-build @@ -21,64 +21,57 @@ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH_CUDA:$LD_LIBRARY_PATH_BUILD:$LD_LIBRARY_PAT ENV OMP_NUM_THREADS=32 ENV MKL_NUM_THREADS=32 ENV VECLIB_MAXIMUM_THREADS=32 -ENV GIT_VERSION=2.17.0 +ENV AXEL_VERSION=2.17.11 + +# Disable the error "Unable to read consumer identity" +RUN sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/subscription-manager.conf # # Install necessary libraries and dependencies # -RUN yum install -y epel-release +RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + +RUN dnf update -y -RUN yum update -y +# Enable RHSCL repository +# RUN dnf install -y 'dnf-command(config-manager)' && \ +# dnf config-manager --set-enabled powertools -RUN yum install centos-release-scl-rh -y # Setup gcc etc. -RUN yum install -y gcc gcc-c++ libgcc libstdc++ libgomp glibc +RUN dnf install -y gcc gcc-c++ libgcc libstdc++ libgomp glibc # Git & gcc requirements -RUN yum install -y libcurl-devel zlib-devel asciidoc xmlto wget make autoconf gettext gmp-devel mpfr-devel libmpc-devel +RUN dnf install -y libcurl-devel zlib-devel wget make autoconf gettext gmp-devel mpfr-devel libmpc-devel -RUN yum install -y devtoolset-7 -# Compile from source because yum's latest version is 1.8.3 -# --depth for submodule update which we use was added in 1.8.4 -RUN \ - wget https://www.kernel.org/pub/software/scm/git/git-${GIT_VERSION}.tar.xz && \ - tar xf git-${GIT_VERSION}.tar.xz && \ - cd git-${GIT_VERSION} && \ - make configure && \ - ./configure --prefix=/usr && \ - make all && \ - make install; +# since we are using devel version, dev tools hould already be installed +# RUN dnf install -y devtoolset-7 + +# Install git +RUN dnf install -y git #H2O4GPU requirements -RUN yum install -y \ +RUN dnf install -y \ + cmake \ ncurses-devel \ bzip2 \ which \ - axel \ openssl-devel \ libpng-devel \ freetype-devel \ - blas-devel \ - epel-release \ zeromq-devel \ - openblas-devel \ + openblas \ libffi-devel +# Compile from source because ubi8 does not have axel RUN \ - git clone https://github.com/NVIDIA/nccl.git && \ - cd nccl && \ - git checkout tags/v2.4.7-1 && \ - scl enable devtoolset-7 'make CUDA8_GENCODE="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_61,code=sm_61" -j src.build'; - -# cmake 3.17.1 looks incompatible with CUDA -RUN wget https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8.tar.gz && \ - tar -zxvf cmake-3.16.8.tar.gz && \ - cd cmake-3.16.8 && \ - ./bootstrap && \ - make -j`nproc` && \ - make install + wget https://github.com/axel-download-accelerator/axel/releases/download/v${AXEL_VERSION}/axel-${AXEL_VERSION}.tar.gz && \ + tar xf axel-${AXEL_VERSION}.tar.gz && \ + cd axel-${AXEL_VERSION} && \ + ./configure --prefix=/usr && \ + make && \ + make install; RUN mkdir -p /opt/h2oai/h2o4gpu @@ -96,12 +89,12 @@ ENV LLVM_CONFIG=$LLVM4/bin/llvm-config # Library versions # ARG python_version -ENV MINICONDA_VERSION=4.8.3 +ENV MINICONDA_VERSION=23.3.1-0 ENV SWIG_VERSION=3.0.12 # conda -RUN wget https://repo.anaconda.com/miniconda/Miniconda3-py38_${MINICONDA_VERSION}-Linux-`arch`.sh && \ - bash Miniconda3-py38_${MINICONDA_VERSION}-Linux-`arch`.sh -b -p /opt/h2oai/h2o4gpu/python && \ +RUN wget https://repo.anaconda.com/miniconda/Miniconda3-py310_${MINICONDA_VERSION}-Linux-`arch`.sh && \ + bash Miniconda3-py310_${MINICONDA_VERSION}-Linux-`arch`.sh -b -p /opt/h2oai/h2o4gpu/python && \ /opt/h2oai/h2o4gpu/python/bin/conda install -y python=${python_version} conda-build pip && \ /opt/h2oai/h2o4gpu/python/bin/conda update conda-build @@ -135,7 +128,7 @@ RUN \ # Builds from source due to too old versions in yum # WORKDIR $HOME - +RUN dnf install -y pcre-devel # SWIG RUN \ wget https://0xdata-public.s3.amazonaws.com/swig/swig-${SWIG_VERSION}.tar.gz && \ @@ -158,7 +151,7 @@ RUN bash -c 'if [ `arch` = "ppc64le" ]; then \ cd $HOME/arrow/cpp && \ git checkout tags/apache-arrow-0.17.1 && \ yum install -y boost-devel && \ - CFLAGS=-std=c99 pip install numpy==1.19.2 cython==0.29.14 scipy==1.5.2 && \ + CFLAGS=-std=c99 pip install numpy==1.22.0 cython==3.0.8 scipy==1.10.0 && \ cmake -DARROW_CXXFLAGS="-lutil" -DARROW_PYTHON=on && make -j && make install && \ cd $HOME/arrow/python && \ ARROW_HOME=/usr/local python setup.py install && \ @@ -171,9 +164,11 @@ RUN bash -c 'if [ `arch` = "ppc64le" ]; then \ # RUN echo ${python_version} -RUN pip install -U pip==20.1.1 +RUN pip install -U pip==23.0.1 -RUN pip install numpy==1.19.2 scipy==1.5.2 setuptools==49.2.0 +RUN pip install Cython==3.0.8 + +RUN pip install numpy==1.22.0 scipy==1.10.0 setuptools==49.6.0 COPY src/interface_py/requirements_buildonly.txt requirements_buildonly.txt @@ -187,13 +182,15 @@ RUN (localedef -v -c -i en_US -f UTF-8 en_US.UTF-8 || true) ENV LANG en_US.UTF-8 ENV LANGUAGE en_US:en +# RUN dnf install -y --enablerepo=epel boost169-devel + +RUN conda install -y conda-forge::icu conda-forge::python-devtools conda-forge::cuda-opencl-dev # See https://github.com/Microsoft/LightGBM/wiki/Installation-Guide#with-gpu-support for details # https://github.com/Microsoft/LightGBM/pull/929/files # Could compile with these as well: -DBOOST_COMPUTE_USE_OFFLINE_CACHE=OFF -DBOOST_COMPUTE_THREAD_SAFE=ON RUN \ export CUDA_HOME=/usr/local/cuda/ && \ - yum install -y opencl-headers icu libicu-devel bzip2 bzip2-devel zlib-devel python-devel && \ wget https://s3.amazonaws.com/0xdata-public/boost/boost_1_72_0.tar.bz2 && \ tar xjf boost_1_72_0.tar.bz2 && \ cd boost_1_72_0 && \ @@ -220,6 +217,8 @@ RUN bash -c 'if [ `arch` == "ppc64le" ]; then \ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib64/nvidia ENV CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda +RUN git config --global --add safe.directory /root/repo + WORKDIR $HOME ENV GIT_AUTHOR_NAME="anonymous" diff --git a/Dockerfile-runtime-multi-gpu b/Dockerfile-runtime-multi-gpu index 4ade7f471..bd7ad6855 100644 --- a/Dockerfile-runtime-multi-gpu +++ b/Dockerfile-runtime-multi-gpu @@ -12,7 +12,7 @@ ENV PATH=/usr/local/cuda/bin:$PATH ENV CUDADIR=/usr/local/cuda/include/ ENV LD_LIBRARY_PATH=/usr/lib64:/usr/local/lib:$LD_LIBRARY_PATH -ENV MINICONDA_VERSION=4.8.3 +ENV MINICONDA_VERSION=23.3.1-0 # Setup gcc etc. RUN yum install -y epel-release @@ -53,8 +53,8 @@ ENV LLVM_CONFIG=$LLVM4/bin/llvm-config ARG python_version # ln -sf /usr/bin/python36 /usr/bin/python breaks yum -RUN if [ "$use_miniconda" = "1" ] ; then wget https://repo.anaconda.com/miniconda/Miniconda3-py38_${MINICONDA_VERSION}-Linux-`arch`.sh && \ - bash Miniconda3-py38_${MINICONDA_VERSION}-Linux-`arch`.sh -b -p /opt/h2oai/h2o4gpu/python && \ +RUN if [ "$use_miniconda" = "1" ] ; then wget https://repo.anaconda.com/miniconda/Miniconda3-py310_${MINICONDA_VERSION}-Linux-`arch`.sh && \ + bash Miniconda3-py310_${MINICONDA_VERSION}-Linux-`arch`.sh -b -p /opt/h2oai/h2o4gpu/python && \ /opt/h2oai/h2o4gpu/python/bin/conda install -y python=${python_version} pip; else \ yum install -y python36 python36-devel python36-pip && \ ln -sf /usr/bin/python36 /usr/bin/python && ln -s /usr/bin/pip3 /usr/bin/pip; \ diff --git a/Dockerfile-runtime-single-gpu b/Dockerfile-runtime-single-gpu index b60485cae..10d3dcb88 100644 --- a/Dockerfile-runtime-single-gpu +++ b/Dockerfile-runtime-single-gpu @@ -12,7 +12,7 @@ ENV PATH=/usr/local/cuda/bin:$PATH ENV CUDADIR=/usr/local/cuda/include/ ENV LD_LIBRARY_PATH=/usr/lib64:/usr/local/lib:$LD_LIBRARY_PATH -ENV MINICONDA_VERSION=4.8.3 +ENV MINICONDA_VERSION=23.3.1-0 # Setup gcc etc. RUN yum install -y epel-release @@ -42,8 +42,8 @@ RUN mkdir -p /opt/h2oai/h2o4gpu ARG python_version # ln -sf /usr/bin/python36 /usr/bin/python breaks yum -RUN if [ "$use_miniconda" = "1" ] ; then wget https://repo.anaconda.com/miniconda/Miniconda3-py38_${MINICONDA_VERSION}-Linux-`arch`.sh && \ - bash Miniconda3-py38_${MINICONDA_VERSION}-Linux-`arch`.sh -b -p /opt/h2oai/h2o4gpu/python && \ +RUN if [ "$use_miniconda" = "1" ] ; then wget https://repo.anaconda.com/miniconda/Miniconda3-py310_${MINICONDA_VERSION}-Linux-`arch`.sh && \ + bash Miniconda3-py310_${MINICONDA_VERSION}-Linux-`arch`.sh -b -p /opt/h2oai/h2o4gpu/python && \ /opt/h2oai/h2o4gpu/python/bin/conda install -y python=${python_version} pip; else \ yum install -y python36 python36-devel python36-pip && \ ln -sf /usr/bin/python36 /usr/bin/python && ln -s /usr/bin/pip3 /usr/bin/pip; \ diff --git a/Makefile b/Makefile index 03cfd4055..b194192c8 100644 --- a/Makefile +++ b/Makefile @@ -328,7 +328,7 @@ docker-build: export CONTAINER_NAME="local-make-build-cuda$(DOCKER_CUDA_VERSION)" ;\ export versionTag=$(BASE_VERSION) ;\ export extratag="-cuda$(DOCKER_CUDA_VERSION)" ;\ - export dockerimage="nvidia/cuda${DOCKER_ARCH}:$(DOCKER_CUDA_VERSION)-cudnn$(DOCKER_CUDNN_VERSION)-devel-centos7" ;\ + export dockerimage="nvidia/cuda:11.2.2-devel-ubi8" ;\ bash scripts/make-docker-devel.sh docker-runtime: @@ -337,7 +337,7 @@ docker-runtime: export versionTag=$(BASE_VERSION) ;\ export extratag="-cuda$(DOCKER_CUDA_VERSION)" ;\ export fullVersionTag=$(BASE_VERSION) ;\ - export dockerimage="nvidia/cuda${DOCKER_ARCH}:$(DOCKER_CUDA_VERSION)-cudnn$(DOCKER_CUDNN_VERSION)-runtime-centos7" ;\ + export dockerimage="nvidia/cuda:11.2.2-devel-ubi8" ;\ bash scripts/make-docker-runtime.sh docker-runtime-run: @@ -349,7 +349,7 @@ docker-runtests: @echo "+-- Run tests in docker (-nccl-cuda9) --+" export CONTAINER_NAME="localmake-runtests" ;\ export extratag="-cuda$(DOCKER_CUDA_VERSION)" ;\ - export dockerimage="nvidia/cuda${DOCKER_ARCH}:$(DOCKER_CUDA_VERSION)-cudnn$(DOCKER_CUDNN_VERSION)-devel-centos7" ;\ + export dockerimage="nvidia/cuda:11.2.2-devel-ubi8" ;\ export target="dotest" ;\ bash scripts/make-docker-runtests.sh diff --git a/ci/Jenkinsfile-x86_64-cuda10-py310 b/ci/Jenkinsfile-x86_64-cuda10-py310 new file mode 100644 index 000000000..afaec589b --- /dev/null +++ b/ci/Jenkinsfile-x86_64-cuda10-py310 @@ -0,0 +1,327 @@ +#!/usr/bin/groovy + +//################ FILE IS AUTO-GENERATED from .base files +//################ DO NOT MODIFY +//################ See scripts/make_jenkinsfiles.sh + +// TOOD: rename to @Library('h2o-jenkins-pipeline-lib') _ +@Library('test-shared-library') _ + +import ai.h2o.ci.Utils +import static ai.h2o.ci.Utils.banner +def utilsLib = new Utils() +import ai.h2o.ci.BuildInfo + +def commitMessage = '' +def h2o4gpuUtils = null + +def platform = "x86_64-ubi8-cuda11.2.2" +def BUILDTYPE = "cuda10-py310" +def cuda = "nvidia/cuda:11.2.2-devel-ubi8" +def cudart = "nvidia/cuda:11.2.2-devel-ubi8" +def extratag = "-cuda11" +def linuxwheel = "x86_64-centos7-cuda10.whl" +def testtype = "dotest-single-gpu" +def testtype_multi_gpu = "dotest-multi-gpu" +def labelbuild = "nvidia-docker" +def labeltest = "gpu && nvidia-docker" +def labeltest_multi_gpu = "2gpu && nvidia-docker" +def labelruntime = "nvidia-docker" +def doingbenchmark = "0" +def dobenchmark = "0" +def doruntime = "1" +def python = "3.10" +def data_dirs = "-v /home/0xdiag/h2o4gpu/data:/data -v /home/0xdiag/h2o4gpu/open_data:/open_data" +def publish_docs = true//################ BELOW IS COPY/PASTE of ci/Jenkinsfile.template (except stage names) +def benchmark_commit_trigger + +pipeline { + agent none + + // Setup job options + options { + ansiColor('xterm') + timestamps() + timeout(time: 300, unit: 'MINUTES') + buildDiscarder(logRotator(numToKeepStr: '10')) + disableConcurrentBuilds() + skipDefaultCheckout() + } + + environment { + MAKE_OPTS = "-s CI=1" // -s: silent mode + BUILD_TYPE = "${BUILDTYPE}" + } + + stages { + stage("Git clone on Linux x86_64-cuda10-py310") { + + agent { + label "${labelbuild}" + } + steps { + dumpInfo 'Linux Build Info' + // Do checkout + retryWithTimeout(200 /* seconds */, 3 /* retries */) { + deleteDir() + checkout([ + $class : 'GitSCM', + branches : scm.branches, + doGenerateSubmoduleConfigurations: false, + extensions : scm.extensions + [[$class: 'SubmoduleOption', disableSubmodules: true, recursiveSubmodules: false, reference: '', trackingSubmodules: false, shallow: true]], + submoduleCfg : [], + userRemoteConfigs : scm.userRemoteConfigs]) + } + script { + h2o4gpuUtils = load "ci/Jenkinsfile.utils" + buildInfo("h2o4gpu", h2o4gpuUtils.isRelease()) + commitMessage = sh(script: 'git log -1 --pretty=%B | tr "\n" " "', returnStdout: true).trim() + echo "Commit Message: ${commitMessage}" + benchmark_commit_trigger = ("${commitMessage}" ==~ /.*trigger_benchmark.*/) + echo "benchmark_commit_trigger: ${benchmark_commit_trigger}" + } + stash includes: "ci/Jenkinsfile*", name: "jenkinsfiles" + } + } + stage("Build on Centos7 x86_64-cuda10-py310") { + agent { + label "${labelbuild}" + } + when { + expression { + unstash "jenkinsfiles" + h2o4gpuUtils = load "ci/Jenkinsfile.utils" + return "${doingbenchmark}" == "1" || h2o4gpuUtils.doBuild() || h2o4gpuUtils.doTests() || !h2o4gpuUtils.wasStageSuccessful("Build on Centos7 x86_64-cuda10-py310") + } + } + steps { + // Do checkout + retryWithTimeout(200 /* seconds */, 3 /* retries */) { + deleteDir() + checkout([ + $class : 'GitSCM', + branches : scm.branches, + doGenerateSubmoduleConfigurations: false, + extensions : scm.extensions + [[$class: 'SubmoduleOption', disableSubmodules: true, recursiveSubmodules: false, reference: '', trackingSubmodules: false, shallow: true]], + submoduleCfg : [], + userRemoteConfigs : scm.userRemoteConfigs]) + } + script { + h2o4gpuUtils = load "ci/Jenkinsfile.utils" + h2o4gpuUtils.buildOnLinux("${cuda}", "${python}", "${extratag}", "${platform}", "${linuxwheel}") + + buildInfo("h2o4gpu", h2o4gpuUtils.isRelease()) + + script { + // Load the version file content + buildInfo.get().setVersion(utilsLib.getCommandOutput("cat build/VERSION.txt")) + utilsLib.setCurrentBuildName(buildInfo.get().getVersion()) + utilsLib.appendBuildDescription("""|Authors: ${buildInfo.get().getAuthorNames().join(" ")} + |Git SHA: ${buildInfo.get().getGitSha().substring(0, 8)} + |""".stripMargin("|")) + } + + } + } + } + + stage("Test - Multi GPU x86_64-cuda10-py310") { + agent { + label "${labeltest_multi_gpu}" + } + when { + expression { + unstash "jenkinsfiles" + h2o4gpuUtils = load "ci/Jenkinsfile.utils" + return "${doingbenchmark}" == "1" || h2o4gpuUtils.doTests() && (h2o4gpuUtils.rerun_disabled(commitMessage) || !h2o4gpuUtils.wasStageSuccessful("Test | Lint | S3up on Centos7 x86_64-cuda10-py310")) + } + } + steps { + dumpInfo 'Linux Test Info' + // Get source code (should put tests into wheel, then wouldn't have to checkout) + retryWithTimeout(200 /* seconds */, 3 /* retries */) { + deleteDir() + checkout([ + $class : 'GitSCM', + branches : scm.branches, + doGenerateSubmoduleConfigurations: false, + extensions : scm.extensions + [[$class: 'SubmoduleOption', disableSubmodules: true, recursiveSubmodules: false, reference: '', trackingSubmodules: false, shallow: true]], + submoduleCfg : [], + userRemoteConfigs : scm.userRemoteConfigs]) + } + script { + unstash 'version_info' + sh """ + echo "Before Stashed wheel file:" + ls -l src/interface_py/dist/${platform}/ || true + rm -rf src/interface_py/dist/${platform}/ || true + """ + unstash "${linuxwheel}" + sh """ + echo "After Stashed wheel file:" + ls -l src/interface_py/dist/${platform}/ || true + """ + h2o4gpuUtils.runTestsMultiGpu(buildInfo.get(), "${cuda}", "${python}", "${extratag}", "${platform}", "${testtype_multi_gpu}", "${data_dirs}") + } + } + } + + stage("Test - Single GPU | Lint | S3up on Centos7 x86_64-cuda10-py310") { + agent { + label "${labeltest}" + } + when { + expression { + unstash "jenkinsfiles" + h2o4gpuUtils = load "ci/Jenkinsfile.utils" + return "${doingbenchmark}" == "1" || h2o4gpuUtils.doTests() && (h2o4gpuUtils.rerun_disabled(commitMessage) || !h2o4gpuUtils.wasStageSuccessful("Test | Lint | S3up on Centos7 x86_64-cuda10-py310")) + } + } + steps { + dumpInfo 'Linux Test Info' + // Get source code (should put tests into wheel, then wouldn't have to checkout) + retryWithTimeout(200 /* seconds */, 3 /* retries */) { + deleteDir() + checkout([ + $class : 'GitSCM', + branches : scm.branches, + doGenerateSubmoduleConfigurations: false, + extensions : scm.extensions + [[$class: 'SubmoduleOption', disableSubmodules: true, recursiveSubmodules: false, reference: '', trackingSubmodules: false, shallow: true]], + submoduleCfg : [], + userRemoteConfigs : scm.userRemoteConfigs]) + } + script { + unstash 'version_info' + sh """ + echo "Before Stashed wheel file:" + ls -l src/interface_py/dist/${platform}/ || true + rm -rf src/interface_py/dist/${platform}/ || true + """ + unstash "${linuxwheel}" + sh """ + echo "After Stashed wheel file:" + ls -l src/interface_py/dist/${platform}/ || true + """ + unstash "py_docs" + sh """ + echo "After Stashed py documentation file:" + ls -l src/interface_py/docs/_build || true + """ + h2o4gpuUtils.runTestsSingleGpu(buildInfo.get(), "${cuda}", "${python}", "${extratag}", "${platform}", "${testtype}", "${data_dirs}") + } + retryWithTimeout(500 /* seconds */, 5 /* retries */) { + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: "awsArtifactsUploader"]]) { + script { + h2o4gpuUtils.publishToS3(buildInfo.get(), extratag ,platform, publish_docs) + } + } + } + } + } + stage("Build/Publish Runtime Docker Centos7 x86_64-cuda10-py310") { + agent { + label "${labelruntime}" + } + when { + expression { + unstash "jenkinsfiles" + h2o4gpuUtils = load "ci/Jenkinsfile.utils" + return "${doruntime}" == "1" && h2o4gpuUtils.doRuntime() + } + } + steps { + dumpInfo 'Linux Build Info' + // Do checkout + retryWithTimeout(200 /* seconds */, 3 /* retries */) { + deleteDir() + checkout([ + $class : 'GitSCM', + branches : scm.branches, + doGenerateSubmoduleConfigurations: false, + extensions : scm.extensions + [[$class: 'SubmoduleOption', disableSubmodules: true, recursiveSubmodules: false, reference: '', trackingSubmodules: false, shallow: true]], + submoduleCfg : [], + userRemoteConfigs : scm.userRemoteConfigs]) + } + script { + sh """ + echo "Before Stashed wheel file:" + ls -l src/interface_py/dist/${platform}/ || true + rm -rf src/interface_py/dist/${platform}/ || true + """ + unstash "${linuxwheel}" + sh """ + echo "After Stashed wheel file:" + ls -l src/interface_py/dist/${platform} || true + """ + unstash 'version_info' + sh 'echo "Stashed version file:" && ls -l build/' + sh """ + echo "Before unstash condapkg:" + ls -l condapkgs || true + rm -rf condapkgs || true + """ + unstash "condapkg" + sh """ + echo "After unstash condapkg:" + ls -l condapkgs || true + """ + } + script { + h2o4gpuUtils.buildRuntime(buildInfo.get(), "${cudart}", "${python}", "${platform}", "${extratag}", "${data_dirs}") + } + retryWithTimeout(1000 /* seconds */, 5 /* retries */) { + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: "awsArtifactsUploader"]]) { + script { + h2o4gpuUtils.publishRuntimeToS3(buildInfo.get(), "${extratag}") + } + } + } + } + } + + stage("Benchmarking Linux x86_64-cuda10-py310") { + agent { + label 'master' + } + when { + expression { + unstash "jenkinsfiles" + h2o4gpuUtils = load "ci/Jenkinsfile.utils" + echo "benchmark_commit_trigger: ${benchmark_commit_trigger}" + return "${doingbenchmark}" == "1" || (("${benchmark_commit_trigger}"=="true" || h2o4gpuUtils.doTriggerBenchmarksJob()) && "${dobenchmark}" == "1" && env.BRANCH_NAME == "master") + } + } + steps { + script { + utilsLib.appendBuildDescription("BENCH \u2713") + } + + echo banner("Triggering downstream jobs h2o4gpu${extratag}-benchmark : RUNTIME_ID=${buildInfo.get().getVersion()}") + build job: "/h2o4gpu${extratag}-benchmark/${env.BRANCH_NAME}", parameters: [[$class: 'StringParameterValue', name: 'RUNTIME_ID', value: buildInfo.get().getVersion()]], propagate: false, wait: false, quietPeriod: 60 + } + } + + } // end over stages + post { + failure { + node('linux') { + script { + if(env.BRANCH_NAME == "master") { + emailext( + to: "mateusz@h2o.ai, jmckinney@h2o.ai", + subject: "BUILD FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: '''${JELLY_SCRIPT, template="html_gmail"}''', + attachLog: true, + compressLog: true, + recipientProviders: [ + [$class: 'DevelopersRecipientProvider'], + ] + ) + } + } + } + } + } +} + + diff --git a/src/interface_py/requirements_buildonly.txt b/src/interface_py/requirements_buildonly.txt index ad08864da..ae6318895 100644 --- a/src/interface_py/requirements_buildonly.txt +++ b/src/interface_py/requirements_buildonly.txt @@ -1,21 +1,21 @@ -numpydoc==0.8.0 -sphinx==1.8.5 -sphinx_rtd_theme==0.4.3 -pillow==7.2.0 -wheel==0.33.4 -cython==0.29.14 -pandas==1.1.3 -numpy==1.19.2 -scipy==1.5.2 -numba==0.46.0 -psutil==5.6.6 -llvmlite==0.30.0 -future==0.16.0 -tabulate==0.8.2 -joblib==0.14.0 +numpydoc==1.1.0 +sphinx==7.2.6 +sphinx_rtd_theme==2.0.0 +pillow==9.0.1 +wheel==0.42.0 +cython==3.0.8 +pandas==1.2.5 +numpy==1.22.0 +scipy==1.10.0 +numba==0.58.1 +psutil==5.9.7 +llvmlite==0.41.1 +future==0.18.0 +tabulate==0.9.0 +joblib==1.1.1 # the same version as scikit-learn fork -scikit-learn==0.23.2 -dask-cuda==0.12.0 -dask[dataframe]==2.11.0 -distributed==2.11.0 -msgpack==0.6.2 \ No newline at end of file +scikit-learn==1.3.2 +dask-cuda==21.8.0 +dask[dataframe]==2021.7.1 +distributed==2021.7.1 +msgpack==1.0.7