From 1bc3f759533764f136042711aff4683ea3631fb3 Mon Sep 17 00:00:00 2001 From: David Hempston Date: Thu, 11 Dec 2025 14:40:25 +0000 Subject: [PATCH 1/3] Revise documentation for Imperial CX3 HPC Configuration Updated documentation for the Imperial CX3 HPC configuration, including changes to installation instructions and account requirements. --- docs/imperial.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/imperial.md b/docs/imperial.md index 275186ca3..0612e1e3c 100644 --- a/docs/imperial.md +++ b/docs/imperial.md @@ -1,18 +1,19 @@ -# nf-core/configs: Imperial CX1 HPC Configuration +# nf-core/configs: Imperial CX3 HPC Configuration -All nf-core pipelines have been successfully configured for use on the CX1 cluster at Imperial College London HPC. +All nf-core pipelines have been successfully configured for use on the CX3 cluster at Imperial College London HPC. -To use, run the pipeline with `-profile imperial,standard`. This will download and launch the [`imperial.config`](../conf/imperial.config) which has been pre-configured with a setup suitable for the CX1 cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. +To use, run the pipeline with `-profile imperial,standard`. This will download and launch the [`imperial.config`](../conf/imperial.config) which has been pre-configured with a setup suitable for the CX3 cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. -Before running the pipeline you will need to load Nextflow using the environment module system on the CX1 cluster. You can do this by issuing the commands below: +Before running the pipeline you will need to install Nextflow into a conda environment. The instructions below at taken from the [`RCS guidance on using conda`](https://icl-rcs-user-guide.readthedocs.io/en/latest/hpc/applications/guides/conda/) ```bash ## Load Nextflow and Singularity environment modules module load anaconda3/personal -conda install -c bioconda nextflow +miniforge-setup +eval "$(~/miniforge3/bin/conda shell.bash hook)" ``` -> NB: You will need an account to use the HPC cluster CX1 in order to run the pipeline. If in doubt contact IT. -> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT. +> NB: You will need an Imperial account to use any HPC cluster managed by the RCS team. If in doubt contact the [`RCS team`](https://icl-rcs-user-guide.readthedocs.io/en/latest/support/) +> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. > NB: To submit jobs to the Imperial College MEDBIO cluster, use `-profile imperial,medbio` instead. > NB: You will need a restricted access account to use the HPC cluster MEDBIO. From 79ab20e76c9955bc223f03010737596730decb7f Mon Sep 17 00:00:00 2001 From: David Hempston Date: Thu, 11 Dec 2025 14:51:29 +0000 Subject: [PATCH 2/3] Reduce resource limits in imperial.config --- conf/imperial.config | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/conf/imperial.config b/conf/imperial.config index a311c48f2..9bd7445a3 100644 --- a/conf/imperial.config +++ b/conf/imperial.config @@ -8,15 +8,15 @@ params { // Resources max_memory = 920.GB - max_cpus = 256 - max_time = 1000.h + max_cpus = 128 + max_time = 72.h } process { resourceLimits = [ memory: 920.GB, - cpus: 256, - time: 1000.h + cpus: 128, + time: 72.h ] } @@ -81,7 +81,6 @@ profiles { ? '--nv --env CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES' : (workflow.containerEngine == "docker" ? '--gpus all' : null) } - beforeScript = 'module load tools/prod' } } } @@ -109,5 +108,5 @@ executor { singularity { enabled = true autoMounts = true - runOptions = "-B /rds/,/rds/general/user/${USER}/ephemeral/tmp/:/tmp,/rds/general/user/${USER}/ephemeral/tmp/:/var/tmp" + runOptions = "-B /rds/,${TMPDIR}:/tmp,${TMPDIR}:/var/tmp" } From 801055a283479e10ddddb1659e2db00abe5a8904 Mon Sep 17 00:00:00 2001 From: David Hempston Date: Fri, 12 Dec 2025 10:45:40 +0000 Subject: [PATCH 3/3] load miniforge and create env --- docs/imperial.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/imperial.md b/docs/imperial.md index 0612e1e3c..ca170ca6e 100644 --- a/docs/imperial.md +++ b/docs/imperial.md @@ -8,9 +8,10 @@ Before running the pipeline you will need to install Nextflow into a conda envir ```bash ## Load Nextflow and Singularity environment modules -module load anaconda3/personal +module load miniforge/3 miniforge-setup eval "$(~/miniforge3/bin/conda shell.bash hook)" +conda create -n nextflow -c bioconda nextflow ``` > NB: You will need an Imperial account to use any HPC cluster managed by the RCS team. If in doubt contact the [`RCS team`](https://icl-rcs-user-guide.readthedocs.io/en/latest/support/)