Skip to content

Commit 96e8c61

Browse files
committed
Add stackinator build and paraview client config and pvserver launch scripts
These scripts will need to be further customized for use with stackinator based installations of paraview.
1 parent e1ea191 commit 96e8c61

File tree

5 files changed

+397
-0
lines changed

5 files changed

+397
-0
lines changed
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
#!/bin/bash
2+
3+
#SBATCH --job-name=stackinator-paraview
4+
#SBATCH --time=04:00:00
5+
#SBATCH --nodes=1
6+
#SBATCH --partition=nvgpu
7+
#SBATCH --account=csstaff
8+
#SBATCH --output=/users/biddisco/stackinator-output.txt
9+
#SBATCH --error=/users/biddisco/stackinator-error.txt
10+
11+
SRC=/users/biddisco/src
12+
CLUSTER=clariden
13+
STACKI_DIR=$SRC/alps-vcluster/stackinator
14+
RECIPE_DIR=$SRC/alps-vcluster/alps-spack-stacks/recipes/paraview/a100
15+
SYSTEM_DIR=$SRC/alps-vcluster/alps-cluster-config/$CLUSTER
16+
BUILD_DIR=/dev/shm/biddisco
17+
18+
echo "Setup/clean build dir"
19+
#rm -rf ${BUILD_DIR}/*
20+
mkdir -p ${BUILD_DIR}
21+
mkdir -p ${BUILD_DIR}/tmp
22+
23+
echo "Execute stackinator"
24+
$STACKI_DIR/bin/stack-config -s $SYSTEM_DIR -b ${BUILD_DIR} -r $RECIPE_DIR -c $RECIPE_DIR/cache-config.yaml --debug
25+
26+
# if using develop branch of spack, add --develop
27+
#$STACKI_DIR/bin/stack-config -s $SYSTEM_DIR -b ${BUILD_DIR} -r $RECIPE_DIR -c $RECIPE_DIR/cache-config.yaml --debug --develop
28+
29+
# build the squashfs image - bubblewrap is used inside the makefile
30+
echo "Trigger build"
31+
cd /dev/shm/biddisco
32+
env --ignore-environment PATH=/usr/bin:/bin:`pwd`/spack/bin make store.squashfs -j32
33+
34+
echo "Copy generated squashfs file"
35+
DATE=$(date +%F)
36+
cp /dev/shm/biddisco/store.squashfs $SCRATCH/$CLUSTER-paraview-$DATE.squashfs
37+
38+
# -----------------------------------------
39+
# debug : create a shell using the spack setup used to create the squashfs
40+
# -----------------------------------------
41+
# $BUILD_DIR/bwrap-mutable-root.sh --tmpfs ~ --bind $BUILD_DIR/tmp /tmp --bind $BUILD_DIR/store /user-environment env --ignore-environment PATH=/usr/bin:/bin:`pwd`/spack/bin SPACK_SYSTEM_CONFIG_PATH=/user-environment/config /bin/bash --norc --noprofile
42+
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#!/bin/bash
2+
3+
#SBATCH --job-name=stackinator-paraview
4+
#SBATCH --time=04:00:00
5+
#SBATCH --nodes=1
6+
#SBATCH --partition=nvgpu
7+
#SBATCH --account=csstaff
8+
#SBATCH --output=/users/biddisco/stackinator-output.txt
9+
#SBATCH --error=/users/biddisco/stackinator-error.txt
10+
11+
CLUSTER=hohgant
12+
STACKI_DIR=$SRC/alps-vcluster/stackinator
13+
RECIPE_DIR=$SRC/alps-vcluster/alps-spack-stacks/recipes/paraview
14+
SYSTEM_DIR=$SRC/alps-vcluster/alps-cluster-config/$CLUSTER
15+
BUILD_DIR=/dev/shm/biddisco
16+
17+
unalias cp
18+
19+
echo "Setup/clean build dir"
20+
rm -rf ${BUILD_DIR}/*
21+
mkdir -p ${BUILD_DIR}
22+
mkdir -p ${BUILD_DIR}/tmp
23+
24+
echo "Execute stackinator"
25+
$STACKI_DIR/bin/stack-config -s $SYSTEM_DIR -b ${BUILD_DIR} -r $RECIPE_DIR --debug
26+
27+
echo "Trigger build"
28+
cd ${BUILD_DIR}
29+
30+
# copy the spack build-cache gpg key to place where bubblewrap will map /tmp folder
31+
cp $HOME/.ssh/gpg-spack-paraview* ${BUILD_DIR}/tmp/
32+
33+
# build the squashfs image - bubblewrap is used inside the makefile
34+
env --ignore-environment LC_ALL=en_US.UTF-8 PATH=/usr/bin:/bin:${BUILD_DIR}/spack/bin make store.squashfs -j32
35+
36+
echo "Copy generated squashfs file"
37+
DATE=$(date +%F)
38+
cp /dev/shm/biddisco/store.squashfs $SCRATCH/$CLUSTER-paraview-$DATE.squashfs
39+
40+
# -----------------------------------------
41+
# debug : create a shell using the spack setup used to create the squashfs
42+
# -----------------------------------------
43+
# $BUILD_DIR/bwrap-mutable-root.sh --tmpfs ~ --bind $BUILD_DIR/tmp /tmp --bind $BUILD_DIR/store /user-environment env --ignore-environment PATH=/usr/bin:/bin:${BUILD_DIR}/spack/bin SPACK_SYSTEM_CONFIG_PATH=/user-environment/config SPACK_USER_CACHE_PATH=$BUILD_DIR/cache /bin/bash --norc --noprofile
44+
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
#!/bin/bash
2+
3+
#SBATCH --job-name=stackinator-paraview
4+
#SBATCH --time=04:00:00
5+
#SBATCH --nodes=1
6+
#SBATCH --partition=nvgpu
7+
#SBATCH --account=csstaff
8+
#SBATCH --output=/users/biddisco/stackinator-output.txt
9+
#SBATCH --error=/users/biddisco/stackinator-error.txt
10+
11+
CLUSTER=oryx
12+
STACKI_DIR=$SRC/alps-vcluster/stackinator
13+
RECIPE_DIR=$SRC/alps-vcluster/alps-spack-stacks/recipes/paraview/turing
14+
SYSTEM_DIR=$SRC/alps-vcluster/alps-cluster-config/$CLUSTER
15+
BUILD_DIR=/dev/shm/biddisco
16+
17+
echo "Setup/clean build dir"
18+
#rm -rf ${BUILD_DIR}/*
19+
mkdir -p ${BUILD_DIR}
20+
mkdir -p ${BUILD_DIR}/tmp
21+
22+
echo "Execute stackinator"
23+
$STACKI_DIR/bin/stack-config -s $SYSTEM_DIR -b ${BUILD_DIR} -r $RECIPE_DIR -c $RECIPE_DIR/cache-config.yaml --debug
24+
25+
# build the squashfs image - bubblewrap is used inside the makefile
26+
echo "Trigger build"
27+
cd /dev/shm/biddisco
28+
env --ignore-environment PATH=/usr/bin:/bin:`pwd`/spack/bin make store.squashfs -j32
29+
30+
echo "Copy generated squashfs file"
31+
DATE=$(date +%F)
32+
cp /dev/shm/biddisco/store.squashfs $SCRATCH/$CLUSTER-paraview-$DATE.squashfs
33+
34+
# -----------------------------------------
35+
# debug : create a shell using the spack setup used to create the squashfs
36+
# -----------------------------------------
37+
# $BUILD_DIR/bwrap-mutable-root.sh --tmpfs ~ --bind $BUILD_DIR/tmp /tmp --bind $BUILD_DIR/store /user-environment env --ignore-environment PATH=/usr/bin:/bin:`pwd`/spack/bin SPACK_SYSTEM_CONFIG_PATH=/user-environment/config /bin/bash --norc --noprofile
38+
Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
#!/bin/bash -l
2+
# updated Thu Dec 1 02:29:09 PM CET 2022 to add v5.11 for Eiger
3+
# updated Tue Nov 15 03:46:08 PM CET 2022 to add v5.11 for daint-gpu
4+
# updated Tue Jun 7 10:26:45 PM CEST 2022 to use generic hostname command
5+
# updated Thu Feb 17 08:49:14 AM CET 2022 for Eiger, version 5.10
6+
# updated Thu Feb 17 15:56:49 CET 2022 for Daint (GPU version), version 5.10
7+
# updated Thu Feb 17 04:06:33 PM CET 2022: Removed version 5.9
8+
9+
# usage
10+
echo ""
11+
echo "Usage : %1:Session name ($1)"
12+
echo " %2:Job Wall Time ($2)"
13+
echo " %3:server-num-nodes ($3)"
14+
echo " %4:server-num-tasks-per-node ($4)"
15+
echo " %5:server-port ($5)"
16+
echo " %6:login node ($6)"
17+
echo " %7:Version number ($7)"
18+
echo " %8:Queue's name (normal/debug) ($8)"
19+
echo " %9:Memory per Node (standard or high) ($9)"
20+
echo " %10:Account (csstaff or other) ($10)"
21+
echo " %11:Reservation ("" or other) ($11)"
22+
23+
# Create a temporary filename to write our launch script into
24+
TEMP_FILE=`mktemp`
25+
26+
# this enables us to connect to the generic name "daint.cscs.ch" from the client
27+
HOST_NAME=`hostname`.cscs.ch
28+
# HOST_NAME=148.187.134.95
29+
30+
echo "Temporary FileName is :" $TEMP_FILE
31+
32+
nservers=$[$3 * $4]
33+
34+
# Create a job script
35+
echo "#!/bin/bash -l" >> $TEMP_FILE
36+
echo "#SBATCH --job-name=$1" >> $TEMP_FILE
37+
echo "#SBATCH --nodes=$3" >> $TEMP_FILE
38+
echo "#SBATCH --ntasks-per-node=$4" >> $TEMP_FILE
39+
echo "#SBATCH --ntasks=$nservers" >> $TEMP_FILE
40+
echo "#SBATCH --time=$2" >> $TEMP_FILE
41+
echo "#SBATCH --account=${10}" >> $TEMP_FILE
42+
echo "#SBATCH --partition=$8" >> $TEMP_FILE
43+
#echo "#SBATCH --cpus-per-task=256" >> $TEMP_FILE
44+
#echo "#SBATCH --ntasks-per-core=2" >> $TEMP_FILE
45+
#echo "#SBATCH --threads-per-core=2" >> $TEMP_FILE
46+
#echo "#SBATCH --hint=multithread" >> $TEMP_FILE
47+
# if [ "$9" = "high" ]; then
48+
# echo "#SBATCH --mem=497G" >> $TEMP_FILE
49+
# fi
50+
51+
# only ask for a reservation if in the normal queue and no greater than 5 nodes
52+
if [ "$8" = "normal" ];then
53+
if [ ! -z "${11}" ]; then
54+
if [ ! "$3" -gt "5" ]; then
55+
echo "#SBATCH --reservation=${11}" >> $TEMP_FILE
56+
fi
57+
fi
58+
fi
59+
60+
MACHINE_NAME=clariden
61+
export SPACK_ROOT=$SCRATCH/spack-$MACHINE_NAME
62+
export SPACK_USER_CONFIG_PATH=~/.spack-$MACHINE_NAME
63+
export SPACK_SYSTEM_CONFIG_PATH=/user-environment/config
64+
export SPACK_USER_CACHE_PATH=/user-environment/cache
65+
source $SPACK_ROOT/share/spack/setup-env.sh
66+
67+
# Which rendering backend are we using
68+
if [ "$7" = "clariden-5.11-NVIDIA" ]; then
69+
# paraview 5.11 nvidia EGL
70+
SQUASH_IMG=/scratch/aistor/biddisco/clariden-paraview-EGL-2023-08-23.squashfs
71+
SQUASH_CMD="squashfs-mount $SQUASH_IMG:/user-environment"
72+
PV_HASH="/sqd4oxb"
73+
PV_SERVER=$($SQUASH_CMD -- spack location -i paraview $PV_HASH)/bin/pvserver
74+
echo "Using hash $PV_HASH from squashfs $SQUASH_IMG"
75+
echo "pvserver : $PV_SERVER"
76+
77+
elif [ "$7" = "clariden-5.11-osmesa" ]; then
78+
# paraview 5.11 osmesa
79+
PV_HASH="/ltilqh4"
80+
PV_SERVER=$($SQUASH_CMD spack location -i paraview $PV_HASH)/bin/pvserver
81+
OSMESA_PATH=$($SQUASH_CMD spack location -i /qadzwvd)/lib
82+
echo "export LD_LIBRARY_PATH=$OSMESA_PATH:\$LD_LIBRARY_PATH" >> $TEMP_FILE
83+
echo "echo Library path is \$LD_LIBRARY_PATH" >> $TEMP_FILE
84+
85+
fi
86+
87+
echo "" >> $TEMP_FILE
88+
echo "srun -n $nservers -N $3 --cpu_bind=sockets --uenv-file=$SQUASH_IMG $PV_SERVER --reverse-connection --client-host=$HOST_NAME --server-port=$5" >> $TEMP_FILE
89+
90+
cat $TEMP_FILE
91+
92+
# submit the job
93+
94+
sbatch $TEMP_FILE
95+
96+
# wipe the temp file
97+
#rm $TEMP_FILE
98+

0 commit comments

Comments
 (0)