Skip to content

Commit 2c30cf5

Browse files
authored
Merge branch 'nf-core:master' into master
2 parents 88aa739 + 5d15b1c commit 2c30cf5

26 files changed

+226
-542
lines changed

.github/workflows/main.yml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ jobs:
4646
- "bi"
4747
- "bigpurple"
4848
- "bih"
49-
- "binac"
5049
- "binac2"
5150
- "biohpc_gen"
5251
- "biowulf"
@@ -138,7 +137,6 @@ jobs:
138137
- "pawsey_nimbus"
139138
- "pawsey_setonix"
140139
- "pdc_kth"
141-
- "pe2"
142140
- "phoenix"
143141
- "psmn"
144142
- "qmul_apocrita"

conf/bi.config

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,17 @@
1-
params{
1+
// Set parameters to ignore for validation
2+
validation {
3+
ignoreParams = ['bi_globalConfig']
4+
}
5+
6+
params {
27
config_profile_description = 'Boehringer Ingelheim internal profile provided by nf-core/configs.'
38
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
49
config_profile_url = 'https://www.boehringer-ingelheim.com/'
510
}
611

7-
params.globalConfig = System.getenv('NXF_GLOBAL_CONFIG')
8-
if(params.globalConfig == null)
9-
{
10-
def errorMessage = "WARNING: For bi.config requires NXF_GLOBAL_CONFIG env var to be set. Point it to global.config file if you want to use this profile."
11-
System.err.println(errorMessage)
12-
}else{
13-
includeConfig params.globalConfig
12+
params.bi_globalConfig = System.getenv('NXF_GLOBAL_CONFIG')
13+
if (params.bi_globalConfig == null) {
14+
System.err.println("WARNING: For bi.config requires NXF_GLOBAL_CONFIG env var to be set. Point it to global.config file if you want to use this profile.")
15+
} else {
16+
includeConfig params.bi_globalConfig
1417
}

conf/bigpurple.config

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
1-
singularityDir = "/gpfs/scratch/${USER}/singularity_images_nextflow"
2-
31
params {
42
config_profile_description = """
53
NYU School of Medicine BigPurple cluster profile provided by nf-core/configs.
64
module load both singularity/3.1 and squashfs-tools/4.3 before running the pipeline with this profile!!
75
Run from your scratch or lab directory - Nextflow makes a lot of files!!
8-
Also consider running the pipeline on a compute node (srun --pty /bin/bash -t=01:00:00) the first time, as it will be pulling the docker image, which will be converted into a singularity image, which is heavy on the login node and will take some time. Subsequent runs can be done on the login node, as the docker image will only be pulled and converted once. By default the images will be stored in ${singularityDir}
6+
Also consider running the pipeline on a compute node (srun --pty /bin/bash -t=01:00:00) the first time, as it will be pulling the docker image, which will be converted into a singularity image, which is heavy on the login node and will take some time. Subsequent runs can be done on the login node, as the docker image will only be pulled and converted once. By default the images will be stored in /gpfs/scratch/${System.getenv("USER")}/singularity_images_nextflow
97
""".stripIndent()
108
config_profile_contact = 'Tobias Schraink (@tobsecret)'
119
config_profile_url = 'https://github.com/nf-core/configs/blob/master/docs/bigpurple.md'
@@ -14,7 +12,7 @@ params {
1412
singularity {
1513
enabled = true
1614
autoMounts = true
17-
cacheDir = singularityDir
15+
cacheDir = "/gpfs/scratch/${System.getenv("USER")}/singularity_images_nextflow"
1816
}
1917

2018
process {

conf/binac.config

Lines changed: 0 additions & 30 deletions
This file was deleted.

conf/binac2.config

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ profiles {
2222
enabled = true
2323
autoMounts = true
2424
pullTimeout = '120m'
25-
cacheDir = "/pfs/10/project/apptainer_cache/${USER}"
25+
cacheDir = "/pfs/10/project/apptainer_cache/${System.getenv('USER')}"
2626
envWhitelist = 'CUDA_VISIBLE_DEVICES'
2727
}
2828

conf/cannon.config

Lines changed: 43 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,35 +7,63 @@ params{
77
max_time = 14.d
88
}
99

10+
// disable this if you want to use conda environments instead
11+
// docker is not supported on the Cannon cluster
1012
singularity {
1113
enabled = true
1214
autoMounts = true
1315
}
1416

15-
process {
16-
executor = 'slurm'
17+
executor {
18+
name = 'slurm'
1719
queueSize = 2000
1820
submitRateLimit = '10/sec'
21+
}
22+
23+
process {
24+
executor = 'slurm'
1925
resourceLimits = [
2026
memory: 2000.GB,
2127
cpus: 112,
2228
time: 14.d
2329
]
30+
// default time, cpu, and memory for processes that do not specify these resources
31+
// These will be overridden by any process that specifies its own
32+
// see Running Jobs https://docs.rc.fas.harvard.edu/kb/running-jobs/
33+
time = 10.m // time is required or job won't run
34+
cpus = 1
35+
memory = 100.MB
36+
2437
scratch = true
2538
queue = {
26-
switch (true) {
27-
case { task.memory >= 1000.GB && task.time >= 3.d }:
28-
return 'bigmem_intermediate'
29-
case { task.memory >= 1000.GB }:
30-
return 'bigmem'
31-
case { task.memory >= 184.GB && task.time >= 3.d }:
32-
return 'intermediate'
33-
case { task.memory >= 184.GB }:
34-
return 'sapphire'
35-
case { task.time >= 3.d }:
36-
return 'intermediate'
37-
default:
38-
return 'shared'
39+
// GPU handling
40+
if (task.ext.gpu == true) {
41+
if (task.ext.gpu_type == 'h200') {
42+
return 'gpu_h200'
43+
} else {
44+
return 'gpu'
45+
}
46+
}
47+
48+
def mem = task.memory
49+
def t = task.time
50+
51+
if (t > 3.d) {
52+
// Long jobs
53+
if (mem > 990.GB) {
54+
return 'bigmem_intermediate' // limit 14d, 2000 GB
55+
} else {
56+
return 'intermediate' // limit 14d, 990 GB
57+
}
58+
} else {
59+
// ≤ 3d jobs
60+
if (mem > 990.GB) {
61+
return 'bigmem' // limit 3d, 1988 GB
62+
} else if (mem > 184.GB) {
63+
return 'sapphire' // limit 3d, 990 GB
64+
} else {
65+
return 'shared' // largest pool for small jobs, limit 3d, 184 GB
66+
}
3967
}
4068
}
4169
}

conf/ceres.config

Lines changed: 6 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
params {
22
config_profile_description = 'USDA ARS SCINet Ceres Cluster profile'
3-
config_profile_contact = 'Thomas A. Christensen II (@MillironX)'
4-
config_profile_url = 'https://scinet.usda.gov/guide/ceres/'
3+
config_profile_contact = 'SCINet VRSC (scinet_vrsc@usda.gov)'
4+
config_profile_url = 'https://scinet.usda.gov/guides/resources/ceres'
55

66
max_memory = 640.GB
77
max_cpus = 36
8-
max_time = 60.d
8+
max_time = 21.d
99
}
1010

1111
singularity {
@@ -17,30 +17,10 @@ process {
1717
resourceLimits = [
1818
memory: 640.GB,
1919
cpus: 36,
20-
time: 60.d
20+
time: 21.d
2121
]
2222
executor = 'slurm'
2323
scratch = true
24-
queue = {
25-
switch (task.memory) {
26-
case { it >= 216.GB }:
27-
switch (task.time) {
28-
case { it >= 7.d }:
29-
return 'longmem'
30-
default:
31-
return 'mem'
32-
}
33-
default:
34-
switch (task.time) {
35-
case { it >= 21.d }:
36-
return 'long60'
37-
case { it >= 7.d }:
38-
return 'long'
39-
case { it >= 48.h }:
40-
return 'medium'
41-
default:
42-
return 'short'
43-
}
44-
}
45-
}
24+
queue = 'ceres'
25+
clusterOptions = " -A ${System.getenv('SLURM_JOB_ACCOUNT')} "
4626
}

conf/daisybio.config

Lines changed: 25 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,20 +9,24 @@ params {
99
}
1010

1111
// define workDir in /nfs/scratch/nf-core_work/ named after the launch dir
12-
def work_dir = "/nfs/scratch/nf-core_work/"
13-
if(new File(work_dir).exists() && System.getenv("PWD")) {
14-
work_dir = work_dir+System.getenv("PWD").tokenize('/').join('.')
15-
workDir = work_dir
12+
workDir = {
13+
def work_dir = "/nfs/scratch/nf-core_work/"
14+
if(new File(work_dir).exists() && System.getenv("PWD")) {
15+
work_dir = work_dir+System.getenv("PWD").tokenize('/').join('.')
1616

17-
// if directory does not exist, create it and set the group to the group launch dir
18-
if(!new File(work_dir).exists()) {
19-
"mkdir -p ${work_dir}".execute()
20-
def pwd = System.getenv("PWD")
21-
def group = "stat -c %g ${pwd}".execute().text.trim()
22-
"chgrp -R ${group} ${work_dir}".execute()
23-
"chmod -R g+s ${work_dir}".execute()
17+
// if directory does not exist, create it and set the group to the group launch dir
18+
if(!new File(work_dir).exists()) {
19+
"mkdir -p ${work_dir}".execute()
20+
def pwd = System.getenv("PWD")
21+
def group = "stat -c %g ${pwd}".execute().text.trim()
22+
"chgrp -R ${group} ${work_dir}".execute()
23+
"chmod -R g+s ${work_dir}".execute()
24+
}
25+
return work_dir
26+
} else {
27+
return "work"
2428
}
25-
}
29+
}.call()
2630

2731
process {
2832
resourceLimits = [
@@ -75,6 +79,15 @@ profiles {
7579
apptainer.cacheDir = '/nfs/scratch/apptainer_cache'
7680
}
7781

82+
// profile for gpu queue
83+
gpu {
84+
docker.runOptions = '-u $(id -u):$(id -g) --gpus all'
85+
apptainer.runOptions = '--nv'
86+
singularity.runOptions = '--nv'
87+
process.queue = 'shared-gpu'
88+
process.clusterOptions = '--qos=limitgpus --gpus=a40:1 --exclude compms-gpu-1.exbio.wzw.tum.de'
89+
executor.queueSize = 5
90+
}
7891
}
7992

8093

conf/dirac.config

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
params {
88
config_profile_description = "Novo Nordisk's Dirac cluster profile"
99
config_profile_contact = 'Ashot Margaryan (ashotmarg2004@gmail.com), Vincent Aaskov (vincentrose88@gmail.com)'
10+
hpc_user = System.getenv('USER')
1011
}
1112

1213
process {
@@ -18,7 +19,7 @@ process {
1819
executor = 'slurm'
1920
queue = 'compute'
2021
maxRetries = 2
21-
scratch = "/scratch/${USER}"
22+
scratch = "/scratch/${params.hpc_user}"
2223
}
2324

2425
executor {
@@ -30,7 +31,7 @@ apptainer {
3031
enabled = true
3132
autoMounts = true
3233
ociAutoPull = true
33-
cacheDir = "/scratch/${USER}"
34+
cacheDir = "/scratch/${params.hpc_user}"
3435
pullTimeout = '600 min'
3536
runOptions = "--no-mount hostfs"
3637
}

conf/einstein.config

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
params {
88
config_profile_description = "Novo Nordisk's Einstein cluster profile"
99
config_profile_contact = 'Ashot Margaryan (ashotmarg2004@gmail.com)'
10+
hpc_user = System.getenv('USER')
1011
max_memory = "750.GB"
1112
max_cpus = 64
1213
max_time = "240.h"
@@ -21,7 +22,7 @@ process {
2122
executor = 'slurm'
2223
scratch = true
2324
maxRetries = 3
24-
scratch = "/scratch/users/${USER}"
25+
scratch = "/scratch/users/${params.hpc_user}"
2526
}
2627

2728
executor {
@@ -32,6 +33,6 @@ executor {
3233
apptainer {
3334
enabled = true
3435
autoMounts = true
35-
cacheDir = "/scratch/users/${USER}"
36+
cacheDir = "/scratch/users/${params.hpc_user}"
3637
pullTimeout = '600 min'
3738
}

0 commit comments

Comments
 (0)