Skip to content

Commit 663e7f6

Browse files
ax3lkngott
andauthored
Perlmutter (NERSC): Update Modules & Jobscript (#3493)
* Perlmutter (NERSC): Update Modules Update after major system upgrade. * Perlmutter (NERSC): Update Jobscript * Perlmutter (NERSC): Remove Early Access Warning * Perlmutter (NERSC): Process Affinity Co-authored-by: Kevin Gott <[email protected]> Co-authored-by: Kevin Gott <[email protected]>
1 parent 5645f4b commit 663e7f6

File tree

3 files changed

+14
-18
lines changed

3 files changed

+14
-18
lines changed

Docs/source/install/hpc/perlmutter.rst

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,6 @@
33
Perlmutter (NERSC)
44
==================
55

6-
.. warning::
7-
8-
Perlmutter is still in acceptance testing and environments change often.
9-
Please reach visit this page often for updates and reach out to us if something needs an update.
10-
116
The `Perlmutter cluster <https://docs.nersc.gov/systems/perlmutter/>`_ is located at NERSC.
127

138

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/bin/bash -l
22

3-
# Copyright 2021 Axel Huebl, Kevin Gott
3+
# Copyright 2021-2022 Axel Huebl, Kevin Gott
44
#
55
# This file is part of WarpX.
66
#
@@ -13,22 +13,28 @@
1313
#SBATCH -A <proj>
1414
#SBATCH -q regular
1515
#SBATCH -C gpu
16-
#SBATCH -c 32
16+
#SBATCH --exclusive
1717
#SBATCH --ntasks-per-gpu=1
1818
#SBATCH --gpus-per-node=4
1919
#SBATCH -o WarpX.o%j
2020
#SBATCH -e WarpX.e%j
2121

2222
# GPU-aware MPI
2323
export MPICH_GPU_SUPPORT_ENABLED=1
24+
export MPICH_OFI_NIC_POLICY=GPU
2425

25-
# expose one GPU per MPI rank
26-
export CUDA_VISIBLE_DEVICES=$SLURM_LOCALID
26+
# threads for OpenMP and threaded compressors per MPI rank
27+
export SRUN_CPUS_PER_TASK=32
2728

2829
EXE=./warpx
2930
#EXE=../WarpX/build/bin/warpx.3d.MPI.CUDA.DP.OPMD.QED
3031
#EXE=./main3d.gnu.TPROF.MPI.CUDA.ex
3132
INPUTS=inputs_small
3233

33-
srun ${EXE} ${INPUTS} \
34+
# CUDA visible devices are ordered inverse to local task IDs
35+
srun /bin/bash -l -c " \
36+
export CUDA_VISIBLE_DEVICES=$((3-SLURM_LOCALID));
37+
${EXE} ${INPUTS} \
38+
amrex.the_arena_is_managed=0 \
39+
amrex.use_gpu_aware_mpi=1" \
3440
> output.txt

Tools/machines/perlmutter-nersc/perlmutter_warpx.profile.example

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,14 @@
11
# please set your project account
2-
#export proj=<yourProject> # LBNL/AMP: m3906_g
2+
#export proj="<yourProject>_g" # change me
33

44
# required dependencies
55
module load cmake/3.22.0
6-
module load PrgEnv-gnu
7-
module load cudatoolkit
8-
9-
# optional: just an additional text editor
10-
# module load nano # TODO: request from support
116

127
# optional: for QED support with detailed tables
138
module load boost/1.78.0-gnu
149

1510
# optional: for openPMD and PSATD+RZ support
16-
module load cray-hdf5-parallel/1.12.1.1
11+
module load cray-hdf5-parallel/1.12.1.5
1712
export CMAKE_PREFIX_PATH=$HOME/sw/perlmutter/c-blosc-1.21.1:$CMAKE_PREFIX_PATH
1813
export CMAKE_PREFIX_PATH=$HOME/sw/perlmutter/adios2-2.7.1:$CMAKE_PREFIX_PATH
1914
export CMAKE_PREFIX_PATH=$HOME/sw/perlmutter/blaspp-master:$CMAKE_PREFIX_PATH
@@ -25,7 +20,7 @@ export LD_LIBRARY_PATH=$HOME/sw/perlmutter/blaspp-master/lib64:$LD_LIBRARY_PATH
2520
export LD_LIBRARY_PATH=$HOME/sw/perlmutter/lapackpp-master/lib64:$LD_LIBRARY_PATH
2621

2722
# optional: for Python bindings or libEnsemble
28-
module load cray-python/3.9.7.1
23+
module load cray-python/3.9.12.1
2924

3025
if [ -d "$HOME/sw/perlmutter/venvs/warpx" ]
3126
then

0 commit comments

Comments
 (0)