diff --git a/CHANGELOG.md b/CHANGELOG.md index 2647dee3..d18738bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Addition of `params.multi_mapping` to change the level of multi-mapping filtering performed by PretextMap. - This corresponds to the mapq (mapping quality) value. -- Updated `trace` scope to start collecting SummaryStat data again. +- Updated `t +- production { includeConfig 'conf/production.config' }race` scope to start collecting SummaryStat data again. ### Paramters diff --git a/modules/local/cram/filter_align_bwamem2_fixmate_sort/main.nf b/modules/local/cram/filter_align_bwamem2_fixmate_sort/main.nf index 5a5d0dac..3c663ac8 100644 --- a/modules/local/cram/filter_align_bwamem2_fixmate_sort/main.nf +++ b/modules/local/cram/filter_align_bwamem2_fixmate_sort/main.nf @@ -18,9 +18,9 @@ process CRAM_FILTER_ALIGN_BWAMEM2_FIXMATE_SORT { task.ext.when == null || task.ext.when script: - def args = task.ext.args ?: '' + def _args = task.ext.args ?: '' def args1 = task.ext.args1 ?: '' - def args2 = task.ext.args2 ?: '' + def _args2 = task.ext.args2 ?: '' def args3 = task.ext.args3 ?: '' def args4 = task.ext.args4 ?: '' def prefix = task.ext.prefix ?: "${meta.id}" @@ -43,8 +43,6 @@ process CRAM_FILTER_ALIGN_BWAMEM2_FIXMATE_SORT { stub: def prefix = task.ext.prefix ?: "${meta.id}" - def base = "45022_3#2" - def chunkid = "1" """ touch ${prefix}_${base}_${chunkid}_mem.bam diff --git a/modules/local/cram/filter_minimap2_filter5end_fixmate_sort/main.nf b/modules/local/cram/filter_minimap2_filter5end_fixmate_sort/main.nf index 74de35de..7595359d 100644 --- a/modules/local/cram/filter_minimap2_filter5end_fixmate_sort/main.nf +++ b/modules/local/cram/filter_minimap2_filter5end_fixmate_sort/main.nf @@ -18,7 +18,7 @@ process CRAM_FILTER_MINIMAP2_FILTER5END_FIXMATE_SORT { task.ext.when == null || task.ext.when script: - def args = task.ext.args ?: '' + def _args = task.ext.args ?: '' def args1 = task.ext.args1 ?: '' def args2 = task.ext.args2 ?: '' def args3 = task.ext.args3 ?: '' @@ -44,8 +44,6 @@ process CRAM_FILTER_MINIMAP2_FILTER5END_FIXMATE_SORT { stub: def prefix = task.ext.prefix ?: "${meta.id}" - def base = "45022_3#2" - def chunkid = "1" """ touch ${prefix}_${base}_${chunkid}_mm.bam diff --git a/modules/local/find/telomere_windows/main.nf b/modules/local/find/telomere_windows/main.nf index 134ce51d..925df075 100644 --- a/modules/local/find/telomere_windows/main.nf +++ b/modules/local/find/telomere_windows/main.nf @@ -22,7 +22,7 @@ process FIND_TELOMERE_WINDOWS { def VERSION = "1.0" // WARN: Version information not provided by tool on CLI. Please update this string when bumping container versions. def telomere_jar = task.ext.telomere_jar ?: '' def telomere_jvm_params = task.ext.telomere_jvm_params ?: '' - def telomere_window_cut = task.ext.telomere_window_cut ?: 99.9 + def telomere_window_cut = task.ext.telomere_window_cut ?: "99.9" """ java ${telomere_jvm_params} -cp ${projectDir}/bin/${telomere_jar} FindTelomereWindows $file $telomere_window_cut > ${prefix}.windows @@ -35,7 +35,6 @@ process FIND_TELOMERE_WINDOWS { stub: def prefix = task.ext.prefix ?: "${meta.id}" def VERSION = "1.0" // WARN: Version information not provided by tool on CLI. Please update this string when bumping container versions. - def telomere = task.ext.telomere ?: '' """ touch ${prefix}.windows diff --git a/modules/local/gawk_split_directions/main.nf b/modules/local/gawk_split_directions/main.nf index 29b4af8a..8fd2bbd4 100644 --- a/modules/local/gawk_split_directions/main.nf +++ b/modules/local/gawk_split_directions/main.nf @@ -23,12 +23,12 @@ process GAWK_SPLIT_DIRECTIONS { def args = task.ext.args ?: '' // args is used for the main arguments of the tool def args2 = task.ext.args2 ?: '' // args2 is used to specify a program when no program file has been given prefix = task.ext.prefix ?: "${meta.id}" - suffix = task.ext.suffix ?: "${input.collect{ it.getExtension()}.get(0)}" // use the first extension of the input files + suffix = task.ext.suffix ?: "${input.collect{ file -> file.getExtension()}.get(0)}" // use the first extension of the input files program = program_file ? "-f ${program_file}" : "${args2}" - input.collect{ - assert it.name != "${prefix}.${suffix}" : "Input and output names are the same, set prefix in module configuration to disambiguate!" + input.collect{ file -> + assert file.name != "${prefix}.${suffix}" : "Input and output names are the same, set prefix in module configuration to disambiguate!" } """ diff --git a/modules/nf-core/pretextsnapshot/main.nf b/modules/nf-core/pretextsnapshot/main.nf index 76cd89df..07787a6b 100644 --- a/modules/nf-core/pretextsnapshot/main.nf +++ b/modules/nf-core/pretextsnapshot/main.nf @@ -18,7 +18,6 @@ process PRETEXTSNAPSHOT { task.ext.when == null || task.ext.when script: - def VERSION = "0.0.4" def args = task.ext.args ?: '' def prefix = task.ext.prefix ?: "${meta.id}." """ @@ -42,7 +41,7 @@ process PRETEXTSNAPSHOT { cat <<-END_VERSIONS > versions.yml "${task.process}": - pretextsnapshot: $VERSION + pretextsnapshot: \$(echo \$(PretextSnapshot --version 2>&1) | sed 's/^.*PretextSnapshot Version //' ) END_VERSIONS """ } diff --git a/nextflow.config b/nextflow.config index 4ac84afa..bc2a7e3f 100644 --- a/nextflow.config +++ b/nextflow.config @@ -34,9 +34,9 @@ params { plaintext_email = false monochrome_logs = false hook_url = null - help = false - help_full = false - show_hidden = false + help = false + help_full = false + show_hidden = false version = false validate_params = true @@ -217,7 +217,7 @@ process.shell = [ // Disable process selector warnings by default. Use debug profile to enable warnings. nextflow.enable.configProcessNamesValidation = false -def trace_timestamp = new java.util.Date().format( 'yyyy-MM-dd_HH-mm-ss' ) +//def trace_timestamp = new java.util.Date().format( 'yyyy-MM-dd_HH-mm-ss' ) timeline { enabled = true file = "${params.outdir}/pipeline_info/execution_timeline_${params.trace_report_suffix}.html" @@ -293,31 +293,31 @@ validation { command = "nextflow run sanger-tol/curationpretext -profile --input samplesheet.csv --outdir " fullParameter = "help_full" showHiddenParameter = "show_hidden" - beforeText = """ --\033[2m----------------------------------------------------\033[0m- -\033[0;34m _____ \033[0;32m _______ \033[0;31m _\033[0m -\033[0;34m / ____| \033[0;32m|__ __| \033[0;31m| |\033[0m -\033[0;34m | (___ __ _ _ __ __ _ ___ _ __ \033[0m ___ \033[0;32m| |\033[0;33m ___ \033[0;31m| |\033[0m -\033[0;34m \\___ \\ / _` | '_ \\ / _` |/ _ \\ '__|\033[0m|___|\033[0;32m| |\033[0;33m/ _ \\\033[0;31m| |\033[0m -\033[0;34m ____) | (_| | | | | (_| | __/ | \033[0;32m| |\033[0;33m (_) \033[0;31m| |____\033[0m -\033[0;34m |_____/ \\__,_|_| |_|\\__, |\\___|_| \033[0;32m|_|\033[0;33m\\___/\033[0;31m|______|\033[0m -\033[0;34m __/ |\033[0m -\033[0;34m |___/\033[0m -\033[0;35m ${manifest.name} ${manifest.version}\033[0m --\033[2m----------------------------------------------------\033[0m- -""" - afterText = """${manifest.doi ? "\n* The pipeline\n" : ""}${manifest.doi.tokenize(",").collect { " https://doi.org/${it.trim().replace('https://doi.org/','')}"}.join("\n")}${manifest.doi ? "\n" : ""} -* The nf-core framework - https://doi.org/10.1038/s41587-020-0439-x -* Software dependencies - https://github.com/sanger-tol/curationpretext/blob/main/CITATIONS.md -""" +// beforeText = """ +// -\033[2m----------------------------------------------------\033[0m- +// \033[0;34m _____ \033[0;32m _______ \033[0;31m _\033[0m +// \033[0;34m / ____| \033[0;32m|__ __| \033[0;31m| |\033[0m +// \033[0;34m | (___ __ _ _ __ __ _ ___ _ __ \033[0m ___ \033[0;32m| |\033[0;33m ___ \033[0;31m| |\033[0m +// \033[0;34m \\___ \\ / _` | '_ \\ / _` |/ _ \\ '__|\033[0m|___|\033[0;32m| |\033[0;33m/ _ \\\033[0;31m| |\033[0m +// \033[0;34m ____) | (_| | | | | (_| | __/ | \033[0;32m| |\033[0;33m (_) \033[0;31m| |____\033[0m +// \033[0;34m |_____/ \\__,_|_| |_|\\__, |\\___|_| \033[0;32m|_|\033[0;33m\\___/\033[0;31m|______|\033[0m +// \033[0;34m __/ |\033[0m +// \033[0;34m |___/\033[0m +// \033[0;35m ${manifest.name} ${manifest.version}\033[0m +// -\033[2m----------------------------------------------------\033[0m- +// """ +// afterText = """${manifest.doi ? "\n* The pipeline\n" : ""}${manifest.doi.tokenize(",").collect { " https://doi.org/${it.trim().replace('https://doi.org/','')}"}.join("\n")}${manifest.doi ? "\n" : ""} +// * The nf-core framework +// https://doi.org/10.1038/s41587-020-0439-x +// * Software dependencies +// https://github.com/sanger-tol/curationpretext/blob/main/CITATIONS.md +// """ } - summary { - beforeText = validation.help.beforeText - afterText = validation.help.afterText - } + // summary { + // beforeText = validation.help.beforeText + // afterText = validation.help.afterText + // } } // Load modules.config for DSL2 module specific options diff --git a/subworkflows/local/accessory_files/main.nf b/subworkflows/local/accessory_files/main.nf index 1f04bb04..2daabd0c 100644 --- a/subworkflows/local/accessory_files/main.nf +++ b/subworkflows/local/accessory_files/main.nf @@ -19,8 +19,8 @@ workflow ACCESSORY_FILES { main: - ch_versions = Channel.empty() - ch_empty_file = Channel.fromPath("${baseDir}/assets/EMPTY.txt") + ch_versions = channel.empty() + ch_empty_file = channel.fromPath("${baseDir}/assets/EMPTY.txt") // // NOTE: THIS IS DUPLICATED IN THE CURATIONPRETEXT WORKFLOW, diff --git a/subworkflows/local/gap_finder/main.nf b/subworkflows/local/gap_finder/main.nf index 10ca907c..d2771449 100644 --- a/subworkflows/local/gap_finder/main.nf +++ b/subworkflows/local/gap_finder/main.nf @@ -11,7 +11,7 @@ workflow GAP_FINDER { reference_tuple // Channel [ val(meta), path(fasta) ] main: - ch_versions = Channel.empty() + ch_versions = channel.empty() // // MODULE: GENERATES A GAP SUMMARY FILE diff --git a/subworkflows/local/generate_maps/main.nf b/subworkflows/local/generate_maps/main.nf index 413ea356..383115c2 100644 --- a/subworkflows/local/generate_maps/main.nf +++ b/subworkflows/local/generate_maps/main.nf @@ -20,7 +20,7 @@ workflow GENERATE_MAPS { main: - ch_versions = Channel.empty() + ch_versions = channel.empty() // // MODULE: generate a cram csv file containing the required parametres for CRAM_FILTER_ALIGN_BWAMEM2_FIXMATE_SORT @@ -80,7 +80,7 @@ workflow GENERATE_MAPS { hires_pretext = PRETEXTMAP_HIGHRES.out.pretext ch_versions = ch_versions.mix( PRETEXTMAP_HIGHRES.out.versions ) } else { - hires_pretext = Channel.empty() + hires_pretext = channel.empty() } // diff --git a/subworkflows/local/hic_bwamem2/main.nf b/subworkflows/local/hic_bwamem2/main.nf index 8f44feee..1fe65ca8 100644 --- a/subworkflows/local/hic_bwamem2/main.nf +++ b/subworkflows/local/hic_bwamem2/main.nf @@ -19,8 +19,7 @@ workflow HIC_BWAMEM2 { reference_index // Channel: tuple [ val(meta), path( fai ) ] main: - ch_versions = Channel.empty() - mappedbam_ch = Channel.empty() + ch_versions = channel.empty() // // MODULE: Indexing on reference output the folder of indexing files diff --git a/subworkflows/local/hic_minimap2/main.nf b/subworkflows/local/hic_minimap2/main.nf index c9ccfaae..d72396c4 100644 --- a/subworkflows/local/hic_minimap2/main.nf +++ b/subworkflows/local/hic_minimap2/main.nf @@ -21,8 +21,8 @@ workflow HIC_MINIMAP2 { reference_index main: - ch_versions = Channel.empty() - mappedbam_ch = Channel.empty() + ch_versions = channel.empty() + mappedbam_ch = channel.empty() // diff --git a/subworkflows/local/longread_coverage/main.nf b/subworkflows/local/longread_coverage/main.nf index 9fd1f927..5845ae77 100644 --- a/subworkflows/local/longread_coverage/main.nf +++ b/subworkflows/local/longread_coverage/main.nf @@ -17,12 +17,12 @@ workflow LONGREAD_COVERAGE { take: reference_tuple // Channel: [ val(meta), path( reference_file ) ] - reference_index // Channel: [ val(meta), path( reference_indx ) ] + _reference_index // Channel: [ val(meta), path( reference_indx ) ] dot_genome // Channel: [ val(meta), [ path( datafile ) ] ] reads_path // Channel: [ val(meta), path( str ) ] main: - ch_versions = Channel.empty() + ch_versions = channel.empty() // // LOGIC: TAKE THE READ FOLDER AS INPUT AND GENERATE THE CHANNEL OF READ FILES @@ -107,7 +107,7 @@ workflow LONGREAD_COVERAGE { // BEDTOOLS_BAMTOBED.out.bed .combine( dot_genome ) - .multiMap { meta, file, my_genome_meta, my_genome -> + .multiMap { meta, file, _my_genome_meta, my_genome -> input_tuple : tuple ( [ id : meta.id, single_end : true ], @@ -147,7 +147,7 @@ workflow LONGREAD_COVERAGE { GNU_SORT.out.sorted .combine( dot_genome ) .combine( reference_tuple ) - .multiMap { meta, file, meta_my_genome, my_genome, ref_meta, ref -> + .multiMap { _meta, file, _meta_my_genome, my_genome, ref_meta, _ref -> ch_coverage_bed : tuple ( [ id: ref_meta.id, single_end: true diff --git a/subworkflows/local/repeat_density/main.nf b/subworkflows/local/repeat_density/main.nf index ce4400d2..46b2c1ee 100644 --- a/subworkflows/local/repeat_density/main.nf +++ b/subworkflows/local/repeat_density/main.nf @@ -24,7 +24,7 @@ workflow REPEAT_DENSITY { dot_genome main: - ch_versions = Channel.empty() + ch_versions = channel.empty() // @@ -156,7 +156,7 @@ workflow REPEAT_DENSITY { // UCSC_BEDGRAPHTOBIGWIG( GAWK_REPLACE_DOTS.out.output, - GNU_SORT_B.out.sorted.map { it[1] } + GNU_SORT_B.out.sorted.map { _meta, file -> file } ) ch_versions = ch_versions.mix( UCSC_BEDGRAPHTOBIGWIG.out.versions ) diff --git a/subworkflows/local/telo_extraction/main.nf b/subworkflows/local/telo_extraction/main.nf index e5bfd667..a535db6c 100644 --- a/subworkflows/local/telo_extraction/main.nf +++ b/subworkflows/local/telo_extraction/main.nf @@ -6,7 +6,7 @@ workflow TELO_EXTRACTION { telomere_file //tuple(meta, file) main: - ch_versions = Channel.empty() + ch_versions = channel.empty() // // MODULE: GENERATES A WINDOWS FILE FROM THE ABOVE @@ -18,7 +18,7 @@ workflow TELO_EXTRACTION { def windows_file = FIND_TELOMERE_WINDOWS.out.windows - def safe_windows = windows_file.ifEmpty { Channel.empty() } + def safe_windows = windows_file.ifEmpty { channel.empty() } // // MODULE: Extract the telomere data from the FIND_TELOMERE diff --git a/subworkflows/local/telo_finder/main.nf b/subworkflows/local/telo_finder/main.nf index cdf0d223..74b09da1 100644 --- a/subworkflows/local/telo_finder/main.nf +++ b/subworkflows/local/telo_finder/main.nf @@ -15,7 +15,7 @@ workflow TELO_FINDER { teloseq main: - ch_versions = Channel.empty() + ch_versions = channel.empty() // diff --git a/subworkflows/local/utils_nfcore_curationpretext_pipeline/main.nf b/subworkflows/local/utils_nfcore_curationpretext_pipeline/main.nf index a92afd0f..4b16afce 100644 --- a/subworkflows/local/utils_nfcore_curationpretext_pipeline/main.nf +++ b/subworkflows/local/utils_nfcore_curationpretext_pipeline/main.nf @@ -79,13 +79,13 @@ workflow PIPELINE_INITIALISATION { // Create channel from input file provided through params.input // - input_fasta = Channel.fromPath( + input_fasta = channel.fromPath( params.input, checkIfExists: true, type: 'file' ) - cram_dir = Channel.fromPath( + cram_dir = channel.fromPath( params.cram, checkIfExists: true, type: 'dir' @@ -117,21 +117,21 @@ workflow PIPELINE_INITIALISATION { ) } - ch_reads = Channel - .fromPath( - params.reads, - checkIfExists: true, - type: 'dir' - ) - .map { dir -> - tuple( - [ id: params.sample, - single_end: true, - read_type: params.read_type - ], - dir - ) - } + channel.fromPath( + params.reads, + checkIfExists: true, + type: 'dir' + ) + .map { dir -> + tuple( + [ id: params.sample, + single_end: true, + read_type: params.read_type + ], + dir + ) + } + .set { ch_reads } emit: ch_reference diff --git a/workflows/curationpretext.nf b/workflows/curationpretext.nf index ea3db897..cf68d46f 100644 --- a/workflows/curationpretext.nf +++ b/workflows/curationpretext.nf @@ -32,12 +32,12 @@ workflow CURATIONPRETEXT { val_teloseq main: - ch_versions = Channel.empty() - ch_empty_file = Channel.fromPath("${baseDir}/assets/EMPTY.txt") + ch_versions = channel.empty() + ch_empty_file = channel.fromPath("${baseDir}/assets/EMPTY.txt") ch_reference - .branch { meta, file -> + .branch { _meta, file -> zipped: file.name.endsWith('.gz') unzipped: !file.name.endsWith('.gz') } @@ -55,7 +55,7 @@ workflow CURATIONPRETEXT { // // LOGIC: MIX CHANELS WHICH MAY OR MAY NOT BE EMPTY INTO A SINGLE QUEUE CHANNEL // - unzipped_input = Channel.empty() + unzipped_input = channel.empty() unzipped_input .mix(ch_input.unzipped, GUNZIP.out.gunzip) @@ -183,7 +183,7 @@ workflow CURATIONPRETEXT { // // Collate and save software versions // - def topic_versions = Channel.topic("versions") + def topic_versions = channel.topic("versions") .distinct() .branch { entry -> versions_file: entry instanceof Path @@ -207,9 +207,9 @@ workflow CURATIONPRETEXT { name: 'sanger-tol_' + 'curationpretext_software_' + 'versions.yml', sort: true, newLine: true - ).set { ch_collated_versions } + ).set { _ch_collated_versions } - summary_params = paramsSummaryMap( + _summary_params = paramsSummaryMap( workflow, parameters_schema: "nextflow_schema.json")