Skip to content

Commit dcb55da

Browse files
authored
update versions to 25.08 (#972)
update all package version references to 25.08 update rapids dependencies to released 25.08 update spark-rapids from 25.04 to 25.06 Signed-off-by: Erik Ordentlich <[email protected]>
1 parent 4663782 commit dcb55da

File tree

19 files changed

+32
-34
lines changed

19 files changed

+32
-34
lines changed

ci/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,5 +48,5 @@ RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86
4848

4949
# install cuML
5050
ARG CUML_VER=25.08
51-
RUN conda install -y -c rapidsai-nightly -c conda-forge -c nvidia cuml=$CUML_VER cuvs=$CUML_VER python=3.10 cuda-version=12.0 numpy~=1.0 \
51+
RUN conda install -y -c rapidsai -c conda-forge -c nvidia cuml=$CUML_VER cuvs=$CUML_VER python=3.10 cuda-version=12.0 numpy~=1.0 \
5252
&& conda clean --all -f -y

docker/Dockerfile.pip

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ ARG CUDA_VERSION=12.0.1
1818
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
1919

2020
ARG PYSPARK_VERSION=3.3.1
21-
ARG RAPIDS_VERSION=25.6.0
21+
ARG RAPIDS_VERSION=25.8.0
2222
ARG ARCH=amd64
2323
#ARG ARCH=arm64
2424

@@ -47,9 +47,9 @@ RUN apt-get update -y \
4747
# install RAPIDS
4848
# using ~= pulls in micro version patches
4949
RUN pip install --no-cache-dir \
50-
cudf-cu11~=${RAPIDS_VERSION} \
51-
cuml-cu11~=${RAPIDS_VERSION} \
52-
cuvs-cu11~=${RAPIDS_VERSION} \
50+
cudf-cu12~=${RAPIDS_VERSION} \
51+
cuml-cu12~=${RAPIDS_VERSION} \
52+
cuvs-cu12~=${RAPIDS_VERSION} \
5353
numpy~=1.0 \
5454
--extra-index-url=https://pypi.nvidia.com
5555

docker/Dockerfile.python

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
ARG CUDA_VERSION=12.0.1
1818
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
1919

20-
ARG CUML_VERSION=25.06
20+
ARG CUML_VERSION=25.08
2121

2222
# ubuntu22
2323
RUN sed -i -e 's|http://archive.ubuntu.com/ubuntu|https://archive.ubuntu.com/ubuntu|g' \

docs/source/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
project = 'spark-rapids-ml'
2424
copyright = '2025, NVIDIA'
2525
author = 'NVIDIA'
26-
release = '25.06.0'
26+
release = '25.08.0'
2727

2828
# -- General configuration ---------------------------------------------------
2929
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration

jvm/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,10 @@ including setting up the server and running client-side tests.
5050
To start the Spark Connect server with Spark Rapids ML support, follow these steps:
5151

5252
```shell
53-
conda activate rapids-25.06 # from spark-rapids-ml installation
53+
conda activate rapids-25.08 # from spark-rapids-ml installation
5454
export SPARK_HOME=<directory where spark was installed above>
5555
export PYSPARK_PYTHON=$(which python)
56-
export PLUGIN_JAR=$(pip show spark-rapids-ml | grep Location: | cut -d ' ' -f 2 )/spark_rapids_ml/jars/com.nvidia.rapids.ml-25.06.0.jar
56+
export PLUGIN_JAR=$(pip show spark-rapids-ml | grep Location: | cut -d ' ' -f 2 )/spark_rapids_ml/jars/com.nvidia.rapids.ml-25.08.0.jar
5757
$SPARK_HOME/sbin/start-connect-server.sh --master local[*] \
5858
--jars $PLUGIN_JAR \
5959
--conf spark.driver.memory=20G
@@ -107,7 +107,7 @@ mvn clean package -DskipTests
107107
if you would like to compile the plugin and run the unit tests, install `spark-rapids-ml` python package and its dependencies per the above instructions and run the following command:
108108

109109
``` shell
110-
conda activate rapids-25.06
110+
conda activate rapids-25.08
111111
export PYSPARK_PYTHON=$(which python)
112112
mvn clean package
113113
```

jvm/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
<groupId>com.nvidia.rapids</groupId>
2323
<artifactId>ml</artifactId>
24-
<version>25.06.0</version>
24+
<version>25.08.0</version>
2525
<packaging>jar</packaging>
2626

2727
<properties>

notebooks/aws-emr/init-bootstrap-action.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ sudo bash -c "wget https://www.python.org/ftp/python/3.10.9/Python-3.10.9.tgz &&
2727
tar xzf Python-3.10.9.tgz && cd Python-3.10.9 && \
2828
./configure --enable-optimizations && make altinstall"
2929

30-
RAPIDS_VERSION=25.6.0
30+
RAPIDS_VERSION=25.8.0
3131

3232
sudo /usr/local/bin/pip3.10 install --upgrade pip
3333

notebooks/databricks/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ If you already have a Databricks account, you can run the example notebooks on a
2727
spark.task.resource.gpu.amount 0.125
2828
spark.databricks.delta.preview.enabled true
2929
spark.python.worker.reuse true
30-
spark.executorEnv.PYTHONPATH /databricks/jars/rapids-4-spark_2.12-25.04.0.jar:/databricks/spark/python
30+
spark.executorEnv.PYTHONPATH /databricks/jars/rapids-4-spark_2.12-25.06.0.jar:/databricks/spark/python
3131
spark.sql.execution.arrow.maxRecordsPerBatch 100000
3232
spark.plugins com.nvidia.spark.SQLPlugin
3333
spark.locality.wait 0s

notebooks/databricks/init-pip-cuda-12.0.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ set -ex
1818
# IMPORTANT: specify RAPIDS_VERSION fully 23.10.0 and not 23.10
1919
# also in general, RAPIDS_VERSION (python) fields should omit any leading 0 in month/minor field (i.e. 23.8.0 and not 23.08.0)
2020
# while SPARK_RAPIDS_VERSION (jar) should have leading 0 in month/minor (e.g. 23.08.2 and not 23.8.2)
21-
RAPIDS_VERSION=25.6.0
22-
SPARK_RAPIDS_VERSION=25.04.0
21+
RAPIDS_VERSION=25.8.0
22+
SPARK_RAPIDS_VERSION=25.06.0
2323

2424
curl -L https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/${SPARK_RAPIDS_VERSION}/rapids-4-spark_2.12-${SPARK_RAPIDS_VERSION}-cuda12.jar -o /databricks/jars/rapids-4-spark_2.12-${SPARK_RAPIDS_VERSION}.jar
2525

notebooks/dataproc/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ If you already have a Dataproc account, you can run the example notebooks on a D
3131
If you wish to enable [no-import-change](../README.md#no-import-change) UX for the cluster, change the `spark-rapids-ml-no-import-enabled` metadata value to `1` in the command. The initialization script `spark_rapids_ml.sh` checks this metadata value and modifies the run time accordingly.
3232

3333
```
34-
export RAPIDS_VERSION=25.6.0
34+
export RAPIDS_VERSION=25.8.0
3535
3636
gcloud dataproc clusters create $USER-spark-rapids-ml \
3737
--image-version=2.2-ubuntu22 \

0 commit comments

Comments
 (0)