diff --git a/main.nf b/main.nf index 577df3ab89..462cd13ec0 100755 --- a/main.nf +++ b/main.nf @@ -30,7 +30,6 @@ include { ANNOTATION_CACHE_INITIALISATION } from './subworkflows/local/annotatio include { DOWNLOAD_CACHE_SNPEFF_VEP } from './subworkflows/local/download_cache_snpeff_vep' include { PIPELINE_COMPLETION } from './subworkflows/local/utils_nfcore_sarek_pipeline' include { PIPELINE_INITIALISATION } from './subworkflows/local/utils_nfcore_sarek_pipeline' -include { PREPARE_GENOME } from './subworkflows/local/prepare_genome' include { PREPARE_INTERVALS } from './subworkflows/local/prepare_intervals' include { PREPARE_REFERENCE_CNVKIT } from './subworkflows/local/prepare_reference_cnvkit' /* @@ -44,11 +43,11 @@ workflow { PIPELINE_INITIALISATION( params.version, params.validate_params, - params.monochrome_logs, args, params.outdir, params.input, params.references, + params.step, ) // WORKFLOW: Run main workflow diff --git a/nextflow.config b/nextflow.config index 2dbcff3f92..00f27125be 100644 --- a/nextflow.config +++ b/nextflow.config @@ -11,9 +11,9 @@ params { // Mandatory arguments // Input options - input = null // No default input - input_restart = null // No default automatic input - step = 'mapping' // Starts with mapping + input = null // No default input + input_restart = null // No default automatic input + step = 'mapping' // Starts with mapping // References genome = 'GATK/GRCh38' @@ -22,9 +22,44 @@ params { references = "${params.references_config_base}/igenomes/${genome}.yml" snpeff_cache = 's3://annotation-cache/snpeff_cache/' vep_cache = 's3://annotation-cache/vep_cache/' - build_only_index = false // Only build the reference indexes download_cache = false // Do not download annotation cache + // params for references in yaml file + ascat_alleles = null + ascat_genome = null + ascat_loci = null + ascat_loci_gc = null + ascat_loci_rt = null + bwa = null + bwamem2 = null + cf_chrom_len = null + chr_dir = null + dbsnp = null + dbsnp_tbi = null + dbsnp_vqsr = null + dict = null + dragmap = null + fasta = null + fasta_fai = null + germline_resource = null + germline_resource_tbi = null + intervals = null + known_indels = null + known_indels_tbi = null + known_indels_vqsr = null + known_snps = null + known_snps_tbi = null + known_snps_vqsr = null + mappability = null + ngscheckmate_bed = null + pon = null + pon_tbi = null + sentieon_dnascope_model = null + snpeff_db = null + vep_cache_version = null + vep_genome = null + vep_species = null + // Main options no_intervals = false // Intervals will be built from the fasta file nucleotides_per_second = 200000 // Default interval size @@ -80,32 +115,32 @@ params { wes = false // Set to true, if data is exome/targeted sequencing data. Used to use correct models in various variant callers // Annotation - bcftools_annotations = null // No extra annotation file - bcftools_annotations_tbi = null // No extra annotation file index - bcftools_header_lines = null // No header lines to be added to the VCF file - dbnsfp = null // No dbnsfp processed file - dbnsfp_consequence = null // No default consequence for dbnsfp plugin - dbnsfp_fields = "rs_dbSNP,HGVSc_VEP,HGVSp_VEP,1000Gp3_EAS_AF,1000Gp3_AMR_AF,LRT_score,GERP++_RS,gnomAD_exomes_AF" // Default fields for dbnsfp plugin - dbnsfp_tbi = null // No dbnsfp processed file index - outdir_cache = null // No default outdir cache - spliceai_indel = null // No spliceai_indel file - spliceai_indel_tbi = null // No spliceai_indel file index - spliceai_snv = null // No spliceai_snv file - spliceai_snv_tbi = null // No spliceai_snv file index - vep_custom_args = "--everything --filter_common --per_gene --total_length --offline --format vcf" // Default arguments for VEP - vep_dbnsfp = null // dbnsfp plugin disabled within VEP - vep_include_fasta = false // Don't use fasta file for annotation with VEP - vep_loftee = null // loftee plugin disabled within VEP - vep_out_format = "vcf" - vep_spliceai = null // spliceai plugin disabled within VEP - vep_spliceregion = null // spliceregion plugin disabled within VEP - vep_version = "111.0-0" // Should be updated when we update VEP, needs this to get full path to some plugins + bcftools_annotations = null // No extra annotation file + bcftools_annotations_tbi = null // No extra annotation file index + bcftools_header_lines = null // No header lines to be added to the VCF file + dbnsfp = null // No dbnsfp processed file + dbnsfp_consequence = null // No default consequence for dbnsfp plugin + dbnsfp_fields = "rs_dbSNP,HGVSc_VEP,HGVSp_VEP,1000Gp3_EAS_AF,1000Gp3_AMR_AF,LRT_score,GERP++_RS,gnomAD_exomes_AF" // Default fields for dbnsfp plugin + dbnsfp_tbi = null // No dbnsfp processed file index + outdir_cache = null // No default outdir cache + spliceai_indel = null // No spliceai_indel file + spliceai_indel_tbi = null // No spliceai_indel file index + spliceai_snv = null // No spliceai_snv file + spliceai_snv_tbi = null // No spliceai_snv file index + vep_custom_args = "--everything --filter_common --per_gene --total_length --offline --format vcf" // Default arguments for VEP + vep_dbnsfp = null // dbnsfp plugin disabled within VEP + vep_include_fasta = false // Don't use fasta file for annotation with VEP + vep_loftee = null // loftee plugin disabled within VEP + vep_out_format = "vcf" + vep_spliceai = null // spliceai plugin disabled within VEP + vep_spliceregion = null // spliceregion plugin disabled within VEP + vep_version = "111.0-0" // Should be updated when we update VEP, needs this to get full path to some plugins // MultiQC options - multiqc_config = null - multiqc_title = null - multiqc_logo = null - max_multiqc_email_size = '25.MB' + multiqc_config = null + multiqc_title = null + multiqc_logo = null + max_multiqc_email_size = '25.MB' multiqc_methods_description = null // Boilerplate options @@ -389,7 +424,6 @@ plugins { } validation { - defaultIgnoreParams = ["genomes"] lenientMode = true help { enabled = true diff --git a/nextflow_schema.json b/nextflow_schema.json index 5cdf35d555..19b6bcea0b 100644 --- a/nextflow_schema.json +++ b/nextflow_schema.json @@ -365,7 +365,7 @@ "type": "string", "fa_icon": "fas fa-file", "description": "Panel-of-normals VCF (bgzipped) for GATK Mutect2", - "help_text": "Without PON, there will be no calls with PASS in the INFO field, only an unfiltered VCF is written.\nIt is highly recommended to make your own PON, as it depends on sequencer and library preparation.\n\nThe pipeline is shipped with a panel-of-normals for `--genome GATK.GRCh38` provided by [GATK](https://gatk.broadinstitute.org/hc/en-us/articles/360035890631-Panel-of-Normals-PON-). \n\nSee [PON documentation](https://gatk.broadinstitute.org/hc/en-us/articles/360042479112-CreateSomaticPanelOfNormals-BETA)\n> **NB** PON file should be bgzipped." + "help_text": "Without PON, there will be no calls with PASS in the INFO field, only an unfiltered VCF is written.\nIt is highly recommended to make your own PON, as it depends on sequencer and library preparation.\n\nThe pipeline is shipped with a panel-of-normals for `--genome GATK/GRCh38` provided by [GATK](https://gatk.broadinstitute.org/hc/en-us/articles/360035890631-Panel-of-Normals-PON-). \n\nSee [PON documentation](https://gatk.broadinstitute.org/hc/en-us/articles/360042479112-CreateSomaticPanelOfNormals-BETA)\n> **NB** PON file should be bgzipped." }, "pon_tbi": { "type": "string", @@ -536,10 +536,10 @@ } } }, - "general_reference_genome_options": { - "title": "General reference genome options", + "reference_genome_options": { + "title": "Reference genome options", "type": "object", - "description": "General options to interact with reference genomes.", + "description": "Reference genome related files and options required for the workflow. If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately.", "default": "", "properties": { "igenomes_base": { @@ -549,122 +549,111 @@ "fa_icon": "fas fa-ban", "default": "s3://ngi-igenomes/igenomes/" }, - "igenomes_ignore": { - "type": "boolean", - "description": "Do not load the iGenomes reference config.", - "fa_icon": "fas fa-ban", - "help_text": "Do not load `igenomes.config` when running the pipeline. You may choose this option if you observe clashes between custom parameters and those supplied in `igenomes.config`. **NB** You can then run `Sarek` by specifying at least a FASTA genome file" + "genome": { + "type": "string", + "description": "Name of the reference genome in AWS iGenomes or nf-core/references.", + "default": "GATK/GRCh38", + "fa_icon": "fas fa-book", + "help_text": "If using a reference genome configured in the pipeline using AWS iGenomes or nf-core/references, use this parameter to give the ID for the reference. This is then used to build the full paths for all required reference genome files e.g. `--genome GATK/GRCh38`.\n\nSee the [nf-core website docs](https://nf-co.re/usage/reference_genomes) for more details." }, - "save_reference": { - "type": "boolean", - "fa_icon": "fas fa-download", - "description": "Save built references.", - "help_text": "Set this parameter, if you wish to save all computed reference files. This is useful to avoid re-computation on future runs." + "references_config_base": { + "type": "string", + "fa_icon": "fas fa-users-cog", + "description": "Base directory for references yaml files", + "hidden": true, + "help_text": "If you're running offline, Nextflow will not be able to fetch the yaml files from the internet. If you don't need them, then this is not a problem. If you do need them, you should download the files from the repo and tell Nextflow where to find them with this parameter.", + "default": "https://raw.githubusercontent.com/nf-core/references-assets/main" }, - "build_only_index": { - "type": "boolean", - "fa_icon": "fas fa-download", - "description": "Only built references.", - "help_text": "Set this parameter, if you wish to compute and save all computed reference files. No alignment or any other downstream steps will be performed." + "references": { + "format": "file-path", + "type": "string", + "description": "path to reference genome", + "fa_icon": "fas fa-book", + "help_text": "Use this parameter to specify the path to a yaml reference genome file.\n\nSee the [nf-core website docs](https://nf-co.re/usage/reference_genomes) for more details.", + "default": "/path/to/references" }, "download_cache": { "type": "boolean", "fa_icon": "fas fa-download", "description": "Download annotation cache.", "help_text": "Set this parameter, if you wish to download annotation cache.\nUsing this parameter will download cache even if --snpeff_cache and --vep_cache are provided." - } - }, - "fa_icon": "fas fa-dna" - }, - "reference_genome_options": { - "title": "Reference genome options", - "type": "object", - "fa_icon": "fas fa-dna", - "description": "Reference genome related files and options required for the workflow. If you use AWS iGenomes, this has already been set for you appropriately.", - "properties": { - "genome": { - "type": "string", - "description": "Name of iGenomes reference.", - "default": "GATK.GRCh38", - "fa_icon": "fas fa-book", - "help_text": "If using a reference genome configured in the pipeline using iGenomes, use this parameter to give the ID for the reference. This is then used to build the full paths for all required reference genome files e.g. `--genome GRCh38`.\n\nSee the [nf-core website docs](https://nf-co.re/usage/reference_genomes) for more details." }, "ascat_genome": { "type": "string", "description": "ASCAT genome.", - "help_text": "Must be set to run ASCAT, either hg19 or hg38.\n\nIf you use AWS iGenomes, this has already been set for you appropriately.", + "help_text": "Must be set to run ASCAT, either hg19 or hg38.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately.", "enum": ["hg19", "hg38"] }, "ascat_alleles": { "type": "string", "fa_icon": "fas fa-file", "description": "Path to ASCAT allele zip file.", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "ascat_loci": { "type": "string", "fa_icon": "fas fa-file", "description": "Path to ASCAT loci zip file.", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "ascat_loci_gc": { "type": "string", "fa_icon": "fas fa-file", "description": "Path to ASCAT GC content correction file.", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "ascat_loci_rt": { "type": "string", "fa_icon": "fas fa-file", "description": "Path to ASCAT RT (replictiming) correction file.", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "bwa": { "type": "string", "fa_icon": "fas fa-copy", "description": "Path to BWA mem indices.", - "help_text": "If you wish to recompute indices available on igenomes, set `--bwa false`.\n\n> **NB** If none provided, will be generated automatically from the FASTA reference. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you wish to recompute indices available on igenomes, set `--bwa false`.\n\n> **NB** If none provided, will be generated automatically from the FASTA reference. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "bwamem2": { "type": "string", "fa_icon": "fas fa-copy", "description": "Path to bwa-mem2 mem indices.", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately.\n\nIf you wish to recompute indices available on igenomes, set `--bwamem2 false`.\n\n> **NB** If none provided, will be generated automatically from the FASTA reference, if `--aligner bwa-mem2` is specified. Combine with `--save_reference` to save for future runs." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately.\n\nIf you wish to recompute indices available on igenomes, set `--bwamem2 false`.\n\n> **NB** If none provided, will be generated automatically from the FASTA reference, if `--aligner bwa-mem2` is specified. Combine with `--save_reference` to save for future runs." }, "chr_dir": { "type": "string", "fa_icon": "fas fa-folder-open", "description": "Path to chromosomes folder used with ControLFREEC.", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "dbsnp": { "type": "string", "fa_icon": "fas fa-file", "description": "Path to dbsnp file.", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "dbsnp_tbi": { "type": "string", "fa_icon": "fas fa-file", "description": "Path to dbsnp index.", - "help_text": "> **NB** If none provided, will be generated automatically from the dbsnp file. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "> **NB** If none provided, will be generated automatically from the dbsnp file. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "dbsnp_vqsr": { "type": "string", "fa_icon": "fas fa-copy", - "description": "Label string for VariantRecalibration (haplotypecaller joint variant calling).\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "description": "Label string for VariantRecalibration (haplotypecaller joint variant calling).\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "dict": { "type": "string", "fa_icon": "fas fa-file", "description": "Path to FASTA dictionary file.", - "help_text": "> **NB** If none provided, will be generated automatically from the FASTA reference. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "> **NB** If none provided, will be generated automatically from the FASTA reference. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "dragmap": { "type": "string", "fa_icon": "fas fa-copy", "description": "Path to dragmap indices.", - "help_text": "If you wish to recompute indices available on igenomes, set `--dragmap false`.\n\n> **NB** If none provided, will be generated automatically from the FASTA reference, if `--aligner dragmap` is specified. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you wish to recompute indices available on igenomes, set `--dragmap false`.\n\n> **NB** If none provided, will be generated automatically from the FASTA reference, if `--aligner dragmap` is specified. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "fasta": { "type": "string", @@ -673,7 +662,7 @@ "mimetype": "text/plain", "pattern": "^\\S+\\.fn?a(sta)?(\\.gz)?$", "description": "Path to FASTA genome file.", - "help_text": "This parameter is *mandatory* if `--genome` is not specified.\n\nIf you use AWS iGenomes, this has already been set for you appropriately.", + "help_text": "This parameter is *mandatory* if `--genome` is not specified.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately.", "fa_icon": "fas fa-file" }, "fasta_fai": { @@ -682,7 +671,7 @@ "format": "file-path", "exists": true, "mimetype": "text/plain", - "help_text": "> **NB** If none provided, will be generated automatically from the FASTA reference. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes, this has already been set for you appropriately.", + "help_text": "> **NB** If none provided, will be generated automatically from the FASTA reference. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately.", "description": "Path to FASTA reference index." }, "germline_resource": { @@ -692,7 +681,7 @@ "exists": true, "mimetype": "text/plain", "description": "Path to GATK Mutect2 Germline Resource File.", - "help_text": "The germline resource VCF file (bgzipped and tabixed) needed by GATK4 Mutect2 is a collection of calls that are likely present in the sample, with allele frequencies.\nThe AF info field must be present.\nYou can find a smaller, stripped gnomAD VCF file (most of the annotation is removed and only calls signed by PASS are stored) in the AWS iGenomes Annotation/GermlineResource folder.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "The germline resource VCF file (bgzipped and tabixed) needed by GATK4 Mutect2 is a collection of calls that are likely present in the sample, with allele frequencies.\nThe AF info field must be present.\nYou can find a smaller, stripped gnomAD VCF file (most of the annotation is removed and only calls signed by PASS are stored) in the AWS iGenomes Annotation/GermlineResource folder.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "germline_resource_tbi": { "type": "string", @@ -701,7 +690,7 @@ "exists": true, "mimetype": "text/plain", "description": "Path to GATK Mutect2 Germline Resource Index.", - "help_text": "> **NB** If none provided, will be generated automatically from the Germline Resource file, if provided. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "> **NB** If none provided, will be generated automatically from the Germline Resource file, if provided. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "known_indels": { "type": "string", @@ -710,7 +699,7 @@ "exists": true, "mimetype": "text/plain", "description": "Path to known indels file.", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "known_indels_tbi": { "type": "string", @@ -719,12 +708,12 @@ "exists": true, "mimetype": "text/plain", "description": "Path to known indels file index.", - "help_text": "> **NB** If none provided, will be generated automatically from the known index file, if provided. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "> **NB** If none provided, will be generated automatically from the known index file, if provided. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "known_indels_vqsr": { "type": "string", "fa_icon": "fas fa-book", - "description": "Label string for VariantRecalibration (haplotypecaller joint variant calling). If you use AWS iGenomes, this has already been set for you appropriately." + "description": "Label string for VariantRecalibration (haplotypecaller joint variant calling). If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "known_snps": { "type": "string", @@ -733,7 +722,7 @@ "exists": true, "mimetype": "text/plain", "description": "Path to known snps file.", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "known_snps_tbi": { "type": "string", @@ -742,12 +731,12 @@ "exists": true, "mimetype": "text/plain", "description": "Path to known snps file snps.", - "help_text": "> **NB** If none provided, will be generated automatically from the known index file, if provided. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "> **NB** If none provided, will be generated automatically from the known index file, if provided. Combine with `--save_reference` to save for future runs.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "known_snps_vqsr": { "type": "string", "fa_icon": "fas fa-book", - "description": "Label string for VariantRecalibration (haplotypecaller joint variant calling).If you use AWS iGenomes, this has already been set for you appropriately." + "description": "Label string for VariantRecalibration (haplotypecaller joint variant calling).If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "mappability": { "type": "string", @@ -756,7 +745,7 @@ "exists": true, "mimetype": "text/plain", "description": "Path to Control-FREEC mappability file.", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "ngscheckmate_bed": { "type": "string", @@ -765,7 +754,7 @@ "exists": true, "mimetype": "text/plain", "description": "Path to SNP bed file for sample checking with NGSCheckMate", - "help_text": "If you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "If you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "sentieon_dnascope_model": { "type": "string", @@ -774,7 +763,7 @@ "exists": true, "mimetype": "text/plain", "description": "Machine learning model for Sentieon Dnascope.", - "help_text": " It is recommended to use DNAscope with a machine learning model to perform variant calling with higher accuracy by improving the candidate detection and filtering. Sentieon can provide you with a model trained using a subset of the data from the GiAB truth-set found in https://github.com/genome-in-a-bottle. In addition, Sentieon can assist you in the creation of models using your own data, which will calibrate the specifics of your sequencing and bio-informatics processing.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": " It is recommended to use DNAscope with a machine learning model to perform variant calling with higher accuracy by improving the candidate detection and filtering. Sentieon can provide you with a model trained using a subset of the data from the GiAB truth-set found in https://github.com/genome-in-a-bottle. In addition, Sentieon can assist you in the creation of models using your own data, which will calibrate the specifics of your sequencing and bio-informatics processing.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "snpeff_cache": { "type": "string", @@ -782,13 +771,13 @@ "fa_icon": "fas fa-cloud-download-alt", "default": "s3://annotation-cache/snpeff_cache/", "description": "Path to snpEff cache.", - "help_text": "Path to snpEff cache which should contain the relevant genome and build directory in the path ${snpeff_species}.${snpeff_version}\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "Path to snpEff cache which should contain the relevant genome and build directory in the path ${snpeff_species}.${snpeff_version}\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "snpeff_db": { "type": "string", "fa_icon": "fas fa-database", "description": "snpEff DB version.", - "help_text": "This is used to specify the database to be use to annotate with.\nAlternatively databases' names can be listed with the `snpEff databases`.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "This is used to specify the database to be use to annotate with.\nAlternatively databases' names can be listed with the `snpEff databases`.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "vep_cache": { "type": "string", @@ -796,25 +785,25 @@ "fa_icon": "fas fa-cloud-download-alt", "default": "s3://annotation-cache/vep_cache/", "description": "Path to VEP cache.", - "help_text": "Path to VEP cache which should contain the relevant species, genome and build directories at the path ${vep_species}/${vep_genome}_${vep_cache_version}\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "Path to VEP cache which should contain the relevant species, genome and build directories at the path ${vep_species}/${vep_genome}_${vep_cache_version}\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "vep_cache_version": { "type": "string", "fa_icon": "fas fa-tag", "description": "VEP cache version.", - "help_text": "Alternative cache version can be used to specify the correct Ensembl Genomes version number as these differ from the concurrent Ensembl/VEP version numbers.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "Alternative cache version can be used to specify the correct Ensembl Genomes version number as these differ from the concurrent Ensembl/VEP version numbers.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "vep_genome": { "type": "string", "fa_icon": "fas fa-microscope", "description": "VEP genome.", - "help_text": "This is used to specify the genome when looking for local cache, or cloud based cache.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "This is used to specify the genome when looking for local cache, or cloud based cache.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." }, "vep_species": { "type": "string", "fa_icon": "fas fa-microscope", "description": "VEP species.", - "help_text": "Alternatively species listed in Ensembl Genomes caches can be used.\n\nIf you use AWS iGenomes, this has already been set for you appropriately." + "help_text": "Alternatively species listed in Ensembl Genomes caches can be used.\n\nIf you use AWS iGenomes or nf-core/references, this has already been set for you appropriately." } }, "help_text": "The pipeline config files come bundled with paths to the Illumina iGenomes reference index files.\nThe configuration is set up to use the AWS-iGenomes resource\ncf https://ewels.github.io/AWS-iGenomes/." @@ -1016,9 +1005,6 @@ { "$ref": "#/$defs/annotation" }, - { - "$ref": "#/$defs/general_reference_genome_options" - }, { "$ref": "#/$defs/reference_genome_options" }, diff --git a/subworkflows/local/prepare_genome/main.nf b/subworkflows/local/prepare_genome/main.nf index 772af47b37..29993d4d68 100644 --- a/subworkflows/local/prepare_genome/main.nf +++ b/subworkflows/local/prepare_genome/main.nf @@ -8,131 +8,89 @@ // Condition is based on params.step and params.tools // If and extra condition exists, it's specified in comments -include { BWA_INDEX as BWAMEM1_INDEX } from '../../../modules/nf-core/bwa/index/main' -include { BWAMEM2_INDEX } from '../../../modules/nf-core/bwamem2/index/main' -include { DRAGMAP_HASHTABLE } from '../../../modules/nf-core/dragmap/hashtable/main' -include { GATK4_CREATESEQUENCEDICTIONARY } from '../../../modules/nf-core/gatk4/createsequencedictionary/main' -include { MSISENSORPRO_SCAN } from '../../../modules/nf-core/msisensorpro/scan/main' -include { SAMTOOLS_FAIDX } from '../../../modules/nf-core/samtools/faidx/main' -include { TABIX_TABIX as TABIX_BCFTOOLS_ANNOTATIONS } from '../../../modules/nf-core/tabix/tabix/main' -include { TABIX_TABIX as TABIX_DBSNP } from '../../../modules/nf-core/tabix/tabix/main' -include { TABIX_TABIX as TABIX_GERMLINE_RESOURCE } from '../../../modules/nf-core/tabix/tabix/main' -include { TABIX_TABIX as TABIX_KNOWN_INDELS } from '../../../modules/nf-core/tabix/tabix/main' -include { TABIX_TABIX as TABIX_KNOWN_SNPS } from '../../../modules/nf-core/tabix/tabix/main' -include { TABIX_TABIX as TABIX_PON } from '../../../modules/nf-core/tabix/tabix/main' -include { UNTAR as UNTAR_CHR_DIR } from '../../../modules/nf-core/untar/main' -include { UNZIP as UNZIP_ALLELES } from '../../../modules/nf-core/unzip/main' -include { UNZIP as UNZIP_GC } from '../../../modules/nf-core/unzip/main' -include { UNZIP as UNZIP_LOCI } from '../../../modules/nf-core/unzip/main' -include { UNZIP as UNZIP_RT } from '../../../modules/nf-core/unzip/main' +include { UNTAR as UNTAR_CHR_DIR } from '../../../modules/nf-core/untar' +include { UNZIP as UNZIP_ALLELES } from '../../../modules/nf-core/unzip' +include { UNZIP as UNZIP_GC } from '../../../modules/nf-core/unzip' +include { UNZIP as UNZIP_LOCI } from '../../../modules/nf-core/unzip' +include { UNZIP as UNZIP_RT } from '../../../modules/nf-core/unzip' workflow PREPARE_GENOME { take: - ascat_alleles // params.ascat_alleles - ascat_loci // params.ascat_loci - ascat_loci_gc // params.ascat_loci_gc - ascat_loci_rt // params.ascat_loci_rt - bcftools_annotations // channel: [optional] bcftools annotations file - chr_dir // params.chr_dir - dbsnp // channel: [optional] dbsnp - fasta // channel: [mandatory] fasta - germline_resource // channel: [optional] germline_resource - known_indels // channel: [optional] known_indels - known_snps // channel: [optional] known_snps - pon // channel: [optional] pon - + ascat_alleles // params.ascat_alleles + ascat_loci // params.ascat_loci + ascat_loci_gc // params.ascat_loci_gc + ascat_loci_rt // params.ascat_loci_rt + chr_dir // params.chr_dir main: versions = Channel.empty() - BWAMEM1_INDEX(fasta) // If aligner is bwa-mem - BWAMEM2_INDEX(fasta) // If aligner is bwa-mem2 - DRAGMAP_HASHTABLE(fasta) // If aligner is dragmap - - GATK4_CREATESEQUENCEDICTIONARY(fasta) - MSISENSORPRO_SCAN(fasta) - SAMTOOLS_FAIDX(fasta, [ [ id:'no_fai' ], [] ] ) - - // the following are flattened and mapped in case the user supplies more than one value for the param - // written for KNOWN_INDELS, but preemptively applied to the rest - // [ file1, file2 ] becomes [ [ meta1, file1 ], [ meta2, file2 ] ] - // outputs are collected to maintain a single channel for relevant TBI files - TABIX_BCFTOOLS_ANNOTATIONS(bcftools_annotations.flatten().map{ it -> [ [ id:it.baseName ], it ] }) - TABIX_DBSNP(dbsnp.flatten().map{ it -> [ [ id:it.baseName ], it ] }) - TABIX_GERMLINE_RESOURCE(germline_resource.flatten().map{ it -> [ [ id:it.baseName ], it ] }) - TABIX_KNOWN_SNPS(known_snps.flatten().map{ it -> [ [ id:it.baseName ], it ] } ) - TABIX_KNOWN_INDELS(known_indels.flatten().map{ it -> [ [ id:it.baseName ], it ] } ) - TABIX_PON(pon.flatten().map{ it -> [ [ id:it.baseName ], it ] }) - // prepare ascat and controlfreec reference files - if (!ascat_alleles) allele_files = Channel.empty() + if (!ascat_alleles) { + allele_files = Channel.empty() + } else if (ascat_alleles.endsWith(".zip")) { - UNZIP_ALLELES(Channel.fromPath(file(ascat_alleles)).collect().map{ it -> [ [ id:it[0].baseName ], it ] }) - allele_files = UNZIP_ALLELES.out.unzipped_archive.map{ it[1] } + UNZIP_ALLELES(Channel.fromPath(file(ascat_alleles)).collect().map { it -> [[id: it[0].baseName], it] }) + allele_files = UNZIP_ALLELES.out.unzipped_archive.map { it[1] } versions = versions.mix(UNZIP_ALLELES.out.versions) - } else allele_files = Channel.fromPath(ascat_alleles).collect() - - if (!ascat_loci) loci_files = Channel.empty() + } + else { + allele_files = Channel.fromPath(ascat_alleles).collect() + } + + if (!ascat_loci) { + loci_files = Channel.empty() + } else if (ascat_loci.endsWith(".zip")) { - UNZIP_LOCI(Channel.fromPath(file(ascat_loci)).collect().map{ it -> [ [ id:it[0].baseName ], it ] }) - loci_files = UNZIP_LOCI.out.unzipped_archive.map{ it[1] } + UNZIP_LOCI(Channel.fromPath(file(ascat_loci)).collect().map { it -> [[id: it[0].baseName], it] }) + loci_files = UNZIP_LOCI.out.unzipped_archive.map { it[1] } versions = versions.mix(UNZIP_LOCI.out.versions) - } else loci_files = Channel.fromPath(ascat_loci).collect() - - if (!ascat_loci_gc) gc_file = Channel.value([]) + } + else { + loci_files = Channel.fromPath(ascat_loci).collect() + } + + if (!ascat_loci_gc) { + gc_file = Channel.value([]) + } else if (ascat_loci_gc.endsWith(".zip")) { - UNZIP_GC(Channel.fromPath(file(ascat_loci_gc)).collect().map{ it -> [ [ id:it[0].baseName ], it ] }) - gc_file = UNZIP_GC.out.unzipped_archive.map{ it[1] } + UNZIP_GC(Channel.fromPath(file(ascat_loci_gc)).collect().map { it -> [[id: it[0].baseName], it] }) + gc_file = UNZIP_GC.out.unzipped_archive.map { it[1] } versions = versions.mix(UNZIP_GC.out.versions) - } else gc_file = Channel.fromPath(ascat_loci_gc).collect() - - if (!ascat_loci_rt) rt_file = Channel.value([]) + } + else { + gc_file = Channel.fromPath(ascat_loci_gc).collect() + } + + if (!ascat_loci_rt) { + rt_file = Channel.value([]) + } else if (ascat_loci_rt.endsWith(".zip")) { - UNZIP_RT(Channel.fromPath(file(ascat_loci_rt)).collect().map{ it -> [ [ id:it[0].baseName ], it ] }) - rt_file = UNZIP_RT.out.unzipped_archive.map{ it[1] } + UNZIP_RT(Channel.fromPath(file(ascat_loci_rt)).collect().map { it -> [[id: it[0].baseName], it] }) + rt_file = UNZIP_RT.out.unzipped_archive.map { it[1] } versions = versions.mix(UNZIP_RT.out.versions) - } else rt_file = Channel.fromPath(ascat_loci_rt).collect() - - if (!chr_dir) chr_files = Channel.value([]) + } + else { + rt_file = Channel.fromPath(ascat_loci_rt).collect() + } + + if (!chr_dir) { + chr_files = Channel.value([]) + } else if (chr_dir.endsWith(".tar.gz")) { - UNTAR_CHR_DIR(Channel.fromPath(file(chr_dir)).collect().map{ it -> [ [ id:it[0].baseName ], it ] }) - chr_files = UNTAR_CHR_DIR.out.untar.map{ it[1] } + UNTAR_CHR_DIR(Channel.fromPath(file(chr_dir)).collect().map { it -> [[id: it[0].baseName], it] }) + chr_files = UNTAR_CHR_DIR.out.untar.map { it[1] } versions = versions.mix(UNTAR_CHR_DIR.out.versions) - } else chr_files = Channel.fromPath(chr_dir).collect() - - // Gather versions of all tools used - versions = versions.mix(BWAMEM1_INDEX.out.versions) - versions = versions.mix(BWAMEM2_INDEX.out.versions) - versions = versions.mix(DRAGMAP_HASHTABLE.out.versions) - versions = versions.mix(GATK4_CREATESEQUENCEDICTIONARY.out.versions) - versions = versions.mix(MSISENSORPRO_SCAN.out.versions) - versions = versions.mix(SAMTOOLS_FAIDX.out.versions) - versions = versions.mix(TABIX_BCFTOOLS_ANNOTATIONS.out.versions) - versions = versions.mix(TABIX_DBSNP.out.versions) - versions = versions.mix(TABIX_GERMLINE_RESOURCE.out.versions) - versions = versions.mix(TABIX_KNOWN_INDELS.out.versions) - versions = versions.mix(TABIX_KNOWN_SNPS.out.versions) - versions = versions.mix(TABIX_PON.out.versions) + } + else { + chr_files = Channel.fromPath(chr_dir).collect() + } emit: - bcftools_annotations_tbi = TABIX_BCFTOOLS_ANNOTATIONS.out.tbi.map{ meta, tbi -> [tbi] }.collect() // path: bcftools_annotations.vcf.gz.tbi - bwa = BWAMEM1_INDEX.out.index.collect() // path: bwa/* - bwamem2 = BWAMEM2_INDEX.out.index.collect() // path: bwamem2/* - hashtable = DRAGMAP_HASHTABLE.out.hashmap.collect() // path: dragmap/* - dbsnp_tbi = TABIX_DBSNP.out.tbi.map{ meta, tbi -> [tbi] }.collect() // path: dbsnb.vcf.gz.tbi - dict = GATK4_CREATESEQUENCEDICTIONARY.out.dict.collect() // path: genome.fasta.dict - fasta_fai = SAMTOOLS_FAIDX.out.fai.collect() // path: genome.fasta.fai - germline_resource_tbi = TABIX_GERMLINE_RESOURCE.out.tbi.map{ meta, tbi -> [tbi] }.collect() // path: germline_resource.vcf.gz.tbi - known_snps_tbi = TABIX_KNOWN_SNPS.out.tbi.map{ meta, tbi -> [tbi] }.collect() // path: {known_indels*}.vcf.gz.tbi - known_indels_tbi = TABIX_KNOWN_INDELS.out.tbi.map{ meta, tbi -> [tbi] }.collect() // path: {known_indels*}.vcf.gz.tbi - msisensorpro_scan = MSISENSORPRO_SCAN.out.list.map{ meta, list -> [list] } // path: genome_msi.list - pon_tbi = TABIX_PON.out.tbi.map{ meta, tbi -> [tbi] }.collect() // path: pon.vcf.gz.tbi - - allele_files // path: allele_files - chr_files // path: chr_files - gc_file // path: gc_file - loci_files // path: loci_files - rt_file // path: rt_file - - versions // channel: [ versions.yml ] + allele_files // path: allele_files + chr_files // path: chr_files + gc_file // path: gc_file + loci_files // path: loci_files + rt_file // path: rt_file + versions // channel: [ versions.yml ] } diff --git a/subworkflows/local/utils_nfcore_sarek_pipeline/main.nf b/subworkflows/local/utils_nfcore_sarek_pipeline/main.nf index cd86d3c3cc..cfc3dd1a5d 100644 --- a/subworkflows/local/utils_nfcore_sarek_pipeline/main.nf +++ b/subworkflows/local/utils_nfcore_sarek_pipeline/main.nf @@ -8,19 +8,19 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -include { SAMPLESHEET_TO_CHANNEL } from '../samplesheet_to_channel' -include { UTILS_NEXTFLOW_PIPELINE } from '../../nf-core/utils_nextflow_pipeline' -include { UTILS_NFCORE_PIPELINE } from '../../nf-core/utils_nfcore_pipeline' -include { UTILS_NFSCHEMA_PLUGIN } from '../../nf-core/utils_nfschema_plugin' -include { completionEmail } from '../../nf-core/utils_nfcore_pipeline' -include { completionSummary } from '../../nf-core/utils_nfcore_pipeline' -include { dashedLine } from '../../nf-core/utils_nfcore_pipeline' -include { getWorkflowVersion } from '../../nf-core/utils_nfcore_pipeline' -include { imNotification } from '../../nf-core/utils_nfcore_pipeline' -include { logColours } from '../../nf-core/utils_nfcore_pipeline' -include { paramsSummaryMap } from 'plugin/nf-schema' -include { samplesheetToList } from 'plugin/nf-schema' -include { workflowCitation } from '../../nf-core/utils_nfcore_pipeline' +include { SAMPLESHEET_TO_CHANNEL } from '../samplesheet_to_channel' +include { UTILS_NEXTFLOW_PIPELINE } from '../../nf-core/utils_nextflow_pipeline' +include { UTILS_NFCORE_PIPELINE } from '../../nf-core/utils_nfcore_pipeline' +include { UTILS_NFSCHEMA_PLUGIN } from '../../nf-core/utils_nfschema_plugin' +include { completionEmail } from '../../nf-core/utils_nfcore_pipeline' +include { completionSummary } from '../../nf-core/utils_nfcore_pipeline' +include { dashedLine } from '../../nf-core/utils_nfcore_pipeline' +include { getWorkflowVersion } from '../../nf-core/utils_nfcore_pipeline' +include { imNotification } from '../../nf-core/utils_nfcore_pipeline' +include { logColours } from '../../nf-core/utils_nfcore_pipeline' +include { paramsSummaryMap } from 'plugin/nf-schema' +include { samplesheetToList } from 'plugin/nf-schema' +include { workflowCitation } from '../../nf-core/utils_nfcore_pipeline' /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -29,15 +29,14 @@ include { workflowCitation } from '../../nf-core/utils_nfcore_pipeline' */ workflow PIPELINE_INITIALISATION { - take: version // boolean: Display version and exit validate_params // boolean: Boolean whether to validate parameters against the schema at runtime - monochrome_logs // boolean: Do not use coloured log outputs nextflow_cli_args // array: List of positional nextflow CLI args outdir // string: The output directory where the results will be saved input // string: Path to input samplesheet references // string: Path to references + step // string: The step to retrieve input from main: @@ -46,20 +45,20 @@ workflow PIPELINE_INITIALISATION { // // Print version and exit if required and dump pipeline parameters to JSON file // - UTILS_NEXTFLOW_PIPELINE ( + UTILS_NEXTFLOW_PIPELINE( version, true, outdir, - workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1 + workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1, ) // // Validate parameters and generate parameter summary to stdout // - UTILS_NFSCHEMA_PLUGIN ( + UTILS_NFSCHEMA_PLUGIN( workflow, validate_params, - null + null, ) // @@ -67,40 +66,11 @@ workflow PIPELINE_INITIALISATION { // UTILS_NFCORE_PIPELINE(nextflow_cli_args) - // - // Custom validation for pipeline parameters - // - validateInputParameters() - - // Check input path parameters to see if they exist - def checkPathParamList = [ - params.bcftools_annotations, - params.bcftools_annotations_tbi, - params.bcftools_header_lines, - params.dbnsfp, - params.dbnsfp_tbi, - params.input, - params.multiqc_config, - params.ngscheckmate_bed, - params.references, - params.sentieon_dnascope_model, - params.spliceai_indel, - params.spliceai_indel_tbi, - ] - -// only check if we are using the tools -if (params.tools && (params.tools.split(',').contains('snpeff') || params.tools.split(',').contains('merge'))) checkPathParamList.add(params.snpeff_cache) -if (params.tools && (params.tools.split(',').contains('vep') || params.tools.split(',').contains('merge'))) checkPathParamList.add(params.vep_cache) - - // def retrieveInput(need_input, step, outdir) { - - params.input_restart = retrieveInput((!params.build_only_index && !params.input), params.step, params.outdir) - - ch_from_samplesheet = params.build_only_index ? Channel.empty() : input ? - Channel.fromList(samplesheetToList(input, "$projectDir/assets/schema_input.json")) : - Channel.fromList(samplesheetToList(params.input_restart, "$projectDir/assets/schema_input.json")) + ch_from_samplesheet = input + ? Channel.fromList(samplesheetToList(input, "${projectDir}/assets/schema_input.json")) + : Channel.fromList(samplesheetToList(retrieveInput(step, outdir), "${projectDir}/assets/schema_input.json")) - ch_from_references = Channel.fromList(samplesheetToList(references, "$projectDir/subworkflows/local/yaml_references/schema_references.json")) + ch_from_references = Channel.fromList(samplesheetToList(references, "${projectDir}/subworkflows/local/yaml_references/schema_references.json")) SAMPLESHEET_TO_CHANNEL( ch_from_samplesheet, @@ -120,11 +90,12 @@ if (params.tools && (params.tools.split(',').contains('vep') || params.tools. params.step, params.tools, params.umi_read_structure, - params.wes) + params.wes, + ) emit: samplesheet = SAMPLESHEET_TO_CHANNEL.out.input_sample - references = ch_from_references + references = ch_from_references versions } @@ -135,7 +106,6 @@ if (params.tools && (params.tools.split(',').contains('vep') || params.tools. */ workflow PIPELINE_COMPLETION { - take: email // string: email address email_on_fail // string: email address sent on pipeline failure @@ -162,7 +132,7 @@ workflow PIPELINE_COMPLETION { plaintext_email, outdir, monochrome_logs, - multiqc_report_list.getVal() + multiqc_report_list.getVal(), ) } @@ -173,7 +143,7 @@ workflow PIPELINE_COMPLETION { } workflow.onError { - log.error "Pipeline failed. Please refer to troubleshooting docs: https://nf-co.re/docs/usage/troubleshooting" + log.error("Pipeline failed. Please refer to troubleshooting docs: https://nf-co.re/docs/usage/troubleshooting") } } @@ -182,42 +152,6 @@ workflow PIPELINE_COMPLETION { FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -// -// Check and validate pipeline parameters -// -def validateInputParameters() { - genomeExistsError() -} - -// -// Validate channels from input samplesheet -// -def validateInputSamplesheet(input) { - def (metas, fastqs) = input[1..2] - - // Check that multiple runs of the same sample are of the same datatype i.e. single-end / paired-end - def endedness_ok = metas.collect{ meta -> meta.single_end }.unique().size == 1 - if (!endedness_ok) { - error("Please check input samplesheet -> Multiple runs of a sample must be of the same datatype i.e. single-end or paired-end: ${metas[0].id}") - } - - return [ metas[0], fastqs ] -} - -// -// Exit pipeline if incorrect --genome key provided -// -def genomeExistsError() { - // TODO: refactor this to use the new genome config - if (params.genomes && params.genome && !params.genomes.containsKey(params.genome)) { - def error_string = "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" + - " Genome '${params.genome}' not found in any config files provided to the pipeline.\n" + - " Currently, the available genome keys are:\n" + - " ${params.genomes.keySet().join(", ")}\n" + - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" - error(error_string) - } -} // // Generate methods description for MultiQC @@ -227,11 +161,11 @@ def toolCitationText() { // Can use ternary operators to dynamically construct based conditions, e.g. params["run_xyz"] ? "Tool (Foo et al. 2023)" : "", // Uncomment function in methodsDescriptionText to render in MultiQC report def citation_text = [ - "Tools used in the workflow included:", - "FastQC (Andrews 2010),", - "MultiQC (Ewels et al. 2016)", - "." - ].join(' ').trim() + "Tools used in the workflow included:", + "FastQC (Andrews 2010),", + "MultiQC (Ewels et al. 2016)", + ".", + ].join(' ').trim() return citation_text } @@ -241,9 +175,9 @@ def toolBibliographyText() { // Can use ternary operators to dynamically construct based conditions, e.g. params["run_xyz"] ? "