diff --git a/modules/ensemblvep/Dockerfile b/modules/ensemblvep/Dockerfile index ac1b4691..b4a1c664 100644 --- a/modules/ensemblvep/Dockerfile +++ b/modules/ensemblvep/Dockerfile @@ -8,13 +8,14 @@ LABEL \ COPY environment.yml / RUN conda env create -f /environment.yml && conda clean -a -# Add conda installation dir to PATH (instead of doing 'conda activate') -ENV PATH /opt/conda/envs/nf-core-vep-104.3/bin:$PATH - # Setup default ARG variables ARG GENOME=GRCh38 ARG SPECIES=homo_sapiens -ARG VEP_VERSION=99 +ARG VEP_VERSION=104 +ARG VEP_TAG=104.3 + +# Add conda installation dir to PATH (instead of doing 'conda activate') +ENV PATH /opt/conda/envs/nf-core-vep-${VEP_TAG}/bin:$PATH # Download Genome RUN vep_install \ @@ -27,4 +28,4 @@ RUN vep_install \ --NO_BIOPERL --NO_HTSLIB --NO_TEST --NO_UPDATE # Dump the details of the installed packages to a file for posterity -RUN conda env export --name nf-core-vep-104.3 > nf-core-vep-104.3.yml +RUN conda env export --name nf-core-vep-${VEP_TAG} > nf-core-vep-${VEP_TAG}.yml diff --git a/modules/ensemblvep/build.sh b/modules/ensemblvep/build.sh index 5fcb91df..650c8704 100755 --- a/modules/ensemblvep/build.sh +++ b/modules/ensemblvep/build.sh @@ -10,11 +10,12 @@ build_push() { VEP_TAG=$4 docker build \ + . \ -t nfcore/vep:${VEP_TAG}.${GENOME} \ - software/vep/. \ --build-arg GENOME=${GENOME} \ --build-arg SPECIES=${SPECIES} \ - --build-arg VEP_VERSION=${VEP_VERSION} + --build-arg VEP_VERSION=${VEP_VERSION} \ + --build-arg VEP_TAG=${VEP_TAG} docker push nfcore/vep:${VEP_TAG}.${GENOME} } diff --git a/modules/ensemblvep/main.nf b/modules/ensemblvep/main.nf index c2bd055f..a5a9b1ab 100644 --- a/modules/ensemblvep/main.nf +++ b/modules/ensemblvep/main.nf @@ -13,6 +13,7 @@ process ENSEMBLVEP { val species val cache_version path cache + path extra_files output: tuple val(meta), path("*.ann.vcf"), emit: vcf diff --git a/modules/ensemblvep/meta.yml b/modules/ensemblvep/meta.yml index cd9c8905..418bb970 100644 --- a/modules/ensemblvep/meta.yml +++ b/modules/ensemblvep/meta.yml @@ -10,17 +10,6 @@ tools: homepage: https://www.ensembl.org/info/docs/tools/vep/index.html documentation: https://www.ensembl.org/info/docs/tools/vep/script/index.html licence: ["Apache-2.0"] -params: - - use_cache: - type: boolean - description: | - Enable the usage of containers with cache - Does not work with conda - - vep_tag: - type: value - description: | - Specify the tag for the container - https://hub.docker.com/r/nfcore/vep/tags input: - meta: type: map @@ -47,6 +36,10 @@ input: type: file description: | path to VEP cache (optional) + - extra_files: + type: tuple + description: | + path to file(s) needed for plugins (optional) output: - vcf: type: file diff --git a/modules/happy/happy/main.nf b/modules/happy/happy/main.nf new file mode 100644 index 00000000..1bb99117 --- /dev/null +++ b/modules/happy/happy/main.nf @@ -0,0 +1,42 @@ +def VERSION = '0.3.14' + +process HAPPY_HAPPY { + tag "$meta.id" + label 'process_medium' + + conda (params.enable_conda ? "bioconda::hap.py=0.3.14" : null) + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/hap.py:0.3.14--py27h5c5a3ab_0': + 'quay.io/biocontainers/hap.py:0.3.14--py27h5c5a3ab_0' }" + + input: + tuple val(meta), path(truth_vcf), path(query_vcf), path(bed) + tuple path(fasta), path(fasta_fai) + + output: + tuple val(meta), path('*.csv'), path('*.json') , emit: metrics + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + + """ + hap.py \\ + $truth_vcf \\ + $query_vcf \\ + $args \\ + --reference $fasta \\ + --threads $task.cpus \\ + -R $bed \\ + -o $prefix + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + hap.py: $VERSION + END_VERSIONS + """ +} diff --git a/modules/happy/happy/meta.yml b/modules/happy/happy/meta.yml new file mode 100644 index 00000000..8ec762d5 --- /dev/null +++ b/modules/happy/happy/meta.yml @@ -0,0 +1,67 @@ +name: "happy_happy" +description: Hap.py is a tool to compare diploid genotypes at haplotype level. Rather than comparing VCF records row by row, hap.py will generate and match alternate sequences in a superlocus. A superlocus is a small region of the genome (sized between 1 and around 1000 bp) that contains one or more variants. +keywords: + - happy + - benchmark + - haplotype +tools: + - "happy": + description: "Haplotype VCF comparison tools" + homepage: "https://www.illumina.com/products/by-type/informatics-products/basespace-sequence-hub/apps/hap-py-benchmarking.html" + documentation: "https://github.com/Illumina/hap.py" + tool_dev_url: "https://github.com/Illumina/hap.py" + doi: "" + licence: "['BSD-2-clause']" + +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - truth_vcf: + type: file + description: gold standard VCF file + pattern: "*.{vcf,vcf.gz}" + - query_vcf: + type: file + description: VCF/GVCF file to query + pattern: "*.{vcf,vcf.gz}" + - bed: + type: file + description: BED file + pattern: "*.bed" + - fasta: + type: file + description: FASTA file of the reference genome + pattern: "*.{fa,fasta}" + - fasta_fai: + type: file + description: The index of the reference FASTA + pattern: "*.fai" + +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - summary: + type: file + description: A CSV file containing the summary of the benchmarking + pattern: "*.summary.csv" + - extended: + type: file + description: A CSV file containing extended info of the benchmarking + pattern: "*.extended.csv" + - runinfo: + type: file + description: A JSON file containing the run info + pattern: "*.runinfo.json" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@nvnieuwk" diff --git a/modules/happy/prepy/main.nf b/modules/happy/prepy/main.nf new file mode 100644 index 00000000..936f56ea --- /dev/null +++ b/modules/happy/prepy/main.nf @@ -0,0 +1,41 @@ +def VERSION = '0.3.14' + +process HAPPY_PREPY { + tag "$meta.id" + label 'process_medium' + + conda (params.enable_conda ? "bioconda::hap.py=0.3.14" : null) + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/hap.py:0.3.14--py27h5c5a3ab_0': + 'quay.io/biocontainers/hap.py:0.3.14--py27h5c5a3ab_0' }" + + input: + tuple val(meta), path(vcf), path(bed) + tuple path(fasta), path(fasta_fai) + + output: + tuple val(meta), path('*.vcf.gz') , emit: preprocessed_vcf + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + + """ + pre.py \\ + $args \\ + -R $bed \\ + --reference $fasta \\ + --threads $task.cpus \\ + $vcf \\ + ${prefix}.vcf.gz + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + pre.py: $VERSION + END_VERSIONS + """ +} diff --git a/modules/happy/prepy/meta.yml b/modules/happy/prepy/meta.yml new file mode 100644 index 00000000..7e27a652 --- /dev/null +++ b/modules/happy/prepy/meta.yml @@ -0,0 +1,55 @@ +name: "happy_prepy" +description: Pre.py is a preprocessing tool made to preprocess VCF files for Hap.py +keywords: + - happy + - benchmark + - haplotype +tools: + - "happy": + description: "Haplotype VCF comparison tools" + homepage: "https://www.illumina.com/products/by-type/informatics-products/basespace-sequence-hub/apps/hap-py-benchmarking.html" + documentation: "https://github.com/Illumina/hap.py" + tool_dev_url: "https://github.com/Illumina/hap.py" + doi: "" + licence: "['BSD-2-clause']" + +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - vcf: + type: file + description: VCF file to preprocess + pattern: "*.{vcf,vcf.gz}" + - bed: + type: file + description: BED file + pattern: "*.bed" + - fasta: + type: file + description: FASTA file of the reference genome + pattern: "*.{fa,fasta}" + - fasta_fai: + type: file + description: The index of the reference FASTA + pattern: "*.fai" + +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - vcf: + type: file + description: A preprocessed VCF file + pattern: "*.vcf.gz" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@nvnieuwk" diff --git a/modules/krona/kronadb/main.nf b/modules/krona/kronadb/main.nf index afcb0694..eaab43f0 100644 --- a/modules/krona/kronadb/main.nf +++ b/modules/krona/kronadb/main.nf @@ -18,7 +18,9 @@ process KRONA_KRONADB { script: def args = task.ext.args ?: '' """ - ktUpdateTaxonomy.sh taxonomy + ktUpdateTaxonomy.sh \\ + $args \\ + taxonomy/ cat <<-END_VERSIONS > versions.yml "${task.process}": diff --git a/modules/krona/ktimporttaxonomy/main.nf b/modules/krona/ktimporttaxonomy/main.nf index 7837bb87..bc78c6ba 100644 --- a/modules/krona/ktimporttaxonomy/main.nf +++ b/modules/krona/ktimporttaxonomy/main.nf @@ -23,7 +23,10 @@ process KRONA_KTIMPORTTAXONOMY { script: def args = task.ext.args ?: '' """ - ktImportTaxonomy "$report" -tax taxonomy + ktImportTaxonomy \\ + $args \\ + -tax taxonomy/ \\ + "$report" cat <<-END_VERSIONS > versions.yml "${task.process}": diff --git a/modules/krona/ktimporttaxonomy/meta.yml b/modules/krona/ktimporttaxonomy/meta.yml index b65919f8..df0ad1c9 100644 --- a/modules/krona/ktimporttaxonomy/meta.yml +++ b/modules/krona/ktimporttaxonomy/meta.yml @@ -23,8 +23,11 @@ input: Groovy Map containing sample information e.g. [ id:'test'] - database: - type: path - description: "Path to the taxonomy database downloaded by krona/kronadb" + type: file + description: | + Path to the taxonomy database .tab file downloaded by krona/ktUpdateTaxonomy + The file will be saved under a folder named "taxonomy" as "taxonomy/taxonomy.tab". + The parent folder will be passed as argument to ktImportTaxonomy. - report: type: file description: "A tab-delimited file with taxonomy IDs and (optionally) query IDs, magnitudes, and scores. Query IDs are taken from column 1, taxonomy IDs from column 2, and scores from column 3. Lines beginning with # will be ignored." diff --git a/modules/krona/ktupdatetaxonomy/main.nf b/modules/krona/ktupdatetaxonomy/main.nf new file mode 100644 index 00000000..8326f219 --- /dev/null +++ b/modules/krona/ktupdatetaxonomy/main.nf @@ -0,0 +1,30 @@ +def VERSION='2.7.1' // Version information not provided by tool on CLI + +process KRONA_KTUPDATETAXONOMY { + label 'process_low' + + conda (params.enable_conda ? "bioconda::krona=2.7.1" : null) + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/krona:2.7.1--pl526_5' : + 'quay.io/biocontainers/krona:2.7.1--pl526_5' }" + + output: + path 'taxonomy/taxonomy.tab', emit: db + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + """ + ktUpdateTaxonomy.sh \\ + $args \\ + taxonomy/ + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + krona: $VERSION + END_VERSIONS + """ +} diff --git a/modules/krona/ktupdatetaxonomy/meta.yml b/modules/krona/ktupdatetaxonomy/meta.yml new file mode 100644 index 00000000..945b5062 --- /dev/null +++ b/modules/krona/ktupdatetaxonomy/meta.yml @@ -0,0 +1,31 @@ +name: krona_ktupdatetaxonomy +description: KronaTools Update Taxonomy downloads a taxonomy database +keywords: + - database + - taxonomy + - krona + - visualisation +tools: + - krona: + description: Krona Tools is a set of scripts to create Krona charts from several Bioinformatics tools as well as from text and XML files. + homepage: https://github.com/marbl/Krona/wiki/KronaTools + documentation: https://github.com/marbl/Krona/wiki/Installing + tool_dev_url: + doi: https://doi.org/10.1186/1471-2105-12-385 + licence: + +input: + - none: There is no input. This module downloads a pre-built taxonomy database for use with Krona Tools. + +output: + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - db: + type: file + description: A TAB separated file that contains a taxonomy database. + pattern: "*.{tab}" + +authors: + - "@mjakobs" diff --git a/modules/motus/downloaddb/main.nf b/modules/motus/downloaddb/main.nf new file mode 100644 index 00000000..317624b5 --- /dev/null +++ b/modules/motus/downloaddb/main.nf @@ -0,0 +1,39 @@ +process MOTUS_DOWNLOADDB { + label 'process_low' + + conda (params.enable_conda ? "bioconda::motus=3.0.1" : null) + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/motus:3.0.1--pyhdfd78af_0': + 'quay.io/biocontainers/motus:3.0.1--pyhdfd78af_0' }" + + input: + path motus_downloaddb_script + + output: + path "db_mOTU/" , emit: db + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def software = "${motus_downloaddb_script.simpleName}_copy.py" + """ + ## must copy script file to working directory, + ## otherwise the reference_db will be download to bin folder + ## other than current directory + cp $motus_downloaddb_script ${software} + python ${software} \\ + $args \\ + -t $task.cpus + + ## mOTUs version number is not available from command line. + ## mOTUs save the version number in index database folder. + ## mOTUs will check the database version is same version as exec version. + cat <<-END_VERSIONS > versions.yml + "${task.process}": + mOTUs: \$(grep motus db_mOTU/db_mOTU_versions | sed 's/motus\\t//g') + END_VERSIONS + """ +} diff --git a/modules/motus/downloaddb/meta.yml b/modules/motus/downloaddb/meta.yml new file mode 100644 index 00000000..64df5ee0 --- /dev/null +++ b/modules/motus/downloaddb/meta.yml @@ -0,0 +1,39 @@ +name: "motus_downloaddb" +description: Download the mOTUs database +keywords: + - classify + - metagenomics + - fastq + - taxonomic profiling + - database + - download +tools: + - "motus": + description: "The mOTU profiler is a computational tool that estimates relative taxonomic abundance of known and currently unknown microbial community members using metagenomic shotgun sequencing data." + homepage: "None" + documentation: "https://github.com/motu-tool/mOTUs/wiki" + tool_dev_url: "https://github.com/motu-tool/mOTUs" + doi: "10.1038/s41467-019-08844-4" + licence: "['GPL v3']" + +input: + - motus_downloaddb: + type: directory + description: | + The mOTUs downloadDB script source file. + It is the source file installed or + remote source in github such as https://raw.githubusercontent.com/motu-tool/mOTUs/master/motus/downloadDB.py + pattern: "downloadDB.py" + +output: + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - db: + type: directory + description: The mOTUs database directory + pattern: "db_mOTU" + +authors: + - "@jianhong" diff --git a/modules/samtools/bam2fq/main.nf b/modules/samtools/bam2fq/main.nf index 5d6aa79d..9301d1d3 100644 --- a/modules/samtools/bam2fq/main.nf +++ b/modules/samtools/bam2fq/main.nf @@ -45,7 +45,7 @@ process SAMTOOLS_BAM2FQ { bam2fq \\ $args \\ -@ $task.cpus \\ - $inputbam >${prefix}_interleaved.fq.gz + $inputbam | gzip --no-name > ${prefix}_interleaved.fq.gz cat <<-END_VERSIONS > versions.yml "${task.process}": diff --git a/modules/snpeff/Dockerfile b/modules/snpeff/Dockerfile index 608716a4..d0e34757 100644 --- a/modules/snpeff/Dockerfile +++ b/modules/snpeff/Dockerfile @@ -8,15 +8,16 @@ LABEL \ COPY environment.yml / RUN conda env create -f /environment.yml && conda clean -a -# Add conda installation dir to PATH (instead of doing 'conda activate') -ENV PATH /opt/conda/envs/nf-core-snpeff-5.0/bin:$PATH - # Setup default ARG variables ARG GENOME=GRCh38 ARG SNPEFF_CACHE_VERSION=99 +ARG SNPEFF_TAG=99 + +# Add conda installation dir to PATH (instead of doing 'conda activate') +ENV PATH /opt/conda/envs/nf-core-snpeff-${SNPEFF_TAG}/bin:$PATH # Download Genome RUN snpEff download -v ${GENOME}.${SNPEFF_CACHE_VERSION} # Dump the details of the installed packages to a file for posterity -RUN conda env export --name nf-core-snpeff-5.0 > nf-core-snpeff-5.0.yml +RUN conda env export --name nf-core-snpeff-${SNPEFF_TAG} > nf-core-snpeff-${SNPEFF_TAG}.yml diff --git a/modules/snpeff/build.sh b/modules/snpeff/build.sh old mode 100755 new mode 100644 index b94ffd69..2fccf9a8 --- a/modules/snpeff/build.sh +++ b/modules/snpeff/build.sh @@ -9,10 +9,11 @@ build_push() { SNPEFF_TAG=$3 docker build \ + . \ -t nfcore/snpeff:${SNPEFF_TAG}.${GENOME} \ - software/snpeff/. \ --build-arg GENOME=${GENOME} \ - --build-arg SNPEFF_CACHE_VERSION=${SNPEFF_CACHE_VERSION} + --build-arg SNPEFF_CACHE_VERSION=${SNPEFF_CACHE_VERSION} \ + --build-arg SNPEFF_TAG=${SNPEFF_TAG} docker push nfcore/snpeff:${SNPEFF_TAG}.${GENOME} } diff --git a/modules/snpeff/meta.yml b/modules/snpeff/meta.yml index c191b9ac..2f0d866e 100644 --- a/modules/snpeff/meta.yml +++ b/modules/snpeff/meta.yml @@ -10,18 +10,6 @@ tools: homepage: https://pcingola.github.io/SnpEff/ documentation: https://pcingola.github.io/SnpEff/se_introduction/ licence: ["MIT"] -params: - - use_cache: - type: boolean - description: | - boolean to enable the usage of containers with cache - Enable the usage of containers with cache - Does not work with conda - - snpeff_tag: - type: value - description: | - Specify the tag for the container - https://hub.docker.com/r/nfcore/snpeff/tags input: - meta: type: map diff --git a/subworkflows/nf-core/annotation/ensemblvep/main.nf b/subworkflows/nf-core/annotation/ensemblvep/main.nf new file mode 100644 index 00000000..5073f38d --- /dev/null +++ b/subworkflows/nf-core/annotation/ensemblvep/main.nf @@ -0,0 +1,31 @@ +// +// Run VEP to annotate VCF files +// + +include { ENSEMBLVEP } from '../../../../modules/ensemblvep/main' +include { TABIX_BGZIPTABIX } from '../../../../modules/tabix/bgziptabix/main' + +workflow ANNOTATION_ENSEMBLVEP { + take: + vcf // channel: [ val(meta), vcf ] + vep_genome // value: genome to use + vep_species // value: species to use + vep_cache_version // value: cache version to use + vep_cache // path: /path/to/vep/cache (optionnal) + vep_extra_files // channel: [ file1, file2...] (optionnal) + + main: + ch_versions = Channel.empty() + + ENSEMBLVEP(vcf, vep_genome, vep_species, vep_cache_version, vep_cache, vep_extra_files) + TABIX_BGZIPTABIX(ENSEMBLVEP.out.vcf) + + // Gather versions of all tools used + ch_versions = ch_versions.mix(ENSEMBLVEP.out.versions.first()) + ch_versions = ch_versions.mix(TABIX_BGZIPTABIX.out.versions.first()) + + emit: + vcf_tbi = TABIX_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ] + reports = ENSEMBLVEP.out.report // path: *.html + versions = ch_versions // path: versions.yml +} diff --git a/subworkflows/nf-core/annotation/ensemblvep/meta.yml b/subworkflows/nf-core/annotation/ensemblvep/meta.yml new file mode 100644 index 00000000..585e003b --- /dev/null +++ b/subworkflows/nf-core/annotation/ensemblvep/meta.yml @@ -0,0 +1,49 @@ +name: annotation_ensemblvep +description: | + Perform annotation with ensemblvep and bgzip + tabix index the resulting VCF file +keywords: + - ensemblvep +modules: + - ensemblvep + - tabix/bgziptabix +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - vcf: + type: file + description: | + vcf to annotate + - genome: + type: value + description: | + which genome to annotate with + - species: + type: value + description: | + which species to annotate with + - cache_version: + type: value + description: | + which version of the cache to annotate with + - cache: + type: file + description: | + path to VEP cache (optional) + - extra_files: + type: tuple + description: | + path to file(s) needed for plugins (optional) +output: + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - vcf_tbi: + type: file + description: Compressed vcf file + tabix index + pattern: "[ *{.vcf.gz,vcf.gz.tbi} ]" +authors: + - "@maxulysse" diff --git a/subworkflows/nf-core/annotation/snpeff/main.nf b/subworkflows/nf-core/annotation/snpeff/main.nf new file mode 100644 index 00000000..dcf06eb0 --- /dev/null +++ b/subworkflows/nf-core/annotation/snpeff/main.nf @@ -0,0 +1,28 @@ +// +// Run SNPEFF to annotate VCF files +// + +include { SNPEFF } from '../../../../modules/snpeff/main' +include { TABIX_BGZIPTABIX } from '../../../../modules/tabix/bgziptabix/main' + +workflow ANNOTATION_SNPEFF { + take: + vcf // channel: [ val(meta), vcf ] + snpeff_db // value: db version to use + snpeff_cache // path: /path/to/snpeff/cache (optionnal) + + main: + ch_versions = Channel.empty() + + SNPEFF(vcf, snpeff_db, snpeff_cache) + TABIX_BGZIPTABIX(SNPEFF.out.vcf) + + // Gather versions of all tools used + ch_versions = ch_versions.mix(SNPEFF.out.versions.first()) + ch_versions = ch_versions.mix(TABIX_BGZIPTABIX.out.versions.first()) + + emit: + vcf_tbi = TABIX_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ] + reports = SNPEFF.out.report // path: *.html + versions = ch_versions // path: versions.yml +} diff --git a/subworkflows/nf-core/annotation_snpeff/meta.yml b/subworkflows/nf-core/annotation/snpeff/meta.yml similarity index 66% rename from subworkflows/nf-core/annotation_snpeff/meta.yml rename to subworkflows/nf-core/annotation/snpeff/meta.yml index e0773626..241a00cc 100644 --- a/subworkflows/nf-core/annotation_snpeff/meta.yml +++ b/subworkflows/nf-core/annotation/snpeff/meta.yml @@ -11,11 +11,19 @@ input: type: map description: | Groovy Map containing sample information - e.g. [ id:'test' ] - - input: - type: vcf - description: list containing one vcf file - pattern: "[ *.{vcf,vcf.gz} ]" + e.g. [ id:'test', single_end:false ] + - vcf: + type: file + description: | + vcf to annotate + - db: + type: value + description: | + which db to annotate with + - cache: + type: file + description: | + path to snpEff cache (optional) output: - versions: type: file diff --git a/subworkflows/nf-core/annotation_ensemblvep/main.nf b/subworkflows/nf-core/annotation_ensemblvep/main.nf deleted file mode 100644 index 3f3ecc6e..00000000 --- a/subworkflows/nf-core/annotation_ensemblvep/main.nf +++ /dev/null @@ -1,26 +0,0 @@ -// -// Run VEP to annotate VCF files -// - -include { ENSEMBLVEP } from '../../../modules/ensemblvep/main' -include { TABIX_BGZIPTABIX as ANNOTATION_BGZIPTABIX } from '../../../modules/tabix/bgziptabix/main' - -workflow ANNOTATION_ENSEMBLVEP { - take: - vcf // channel: [ val(meta), vcf ] - vep_genome // value: which genome - vep_species // value: which species - vep_cache_version // value: which cache version - vep_cache // path: path_to_vep_cache (optionnal) - - main: - ENSEMBLVEP(vcf, vep_genome, vep_species, vep_cache_version, vep_cache) - ANNOTATION_BGZIPTABIX(ENSEMBLVEP.out.vcf) - - ch_versions = ENSEMBLVEP.out.versions.first().mix(ANNOTATION_BGZIPTABIX.out.versions.first()) - - emit: - vcf_tbi = ANNOTATION_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ] - reports = ENSEMBLVEP.out.report // path: *.html - versions = ch_versions // path: versions.yml -} diff --git a/subworkflows/nf-core/annotation_ensemblvep/meta.yml b/subworkflows/nf-core/annotation_ensemblvep/meta.yml deleted file mode 100644 index 991a8b2f..00000000 --- a/subworkflows/nf-core/annotation_ensemblvep/meta.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: annotation_ensemblvep -description: | - Perform annotation with ensemblvep and bgzip + tabix index the resulting VCF file -keywords: - - ensemblvep -modules: - - ensemblvep - - tabix/bgziptabix -input: - - meta: - type: map - description: | - Groovy Map containing sample information - e.g. [ id:'test' ] - - input: - type: vcf - description: list containing one vcf file - pattern: "[ *.{vcf,vcf.gz} ]" -output: - - versions: - type: file - description: File containing software versions - pattern: "versions.yml" - - vcf_tbi: - type: file - description: Compressed vcf file + tabix index - pattern: "[ *{.vcf.gz,vcf.gz.tbi} ]" -authors: - - "@maxulysse" diff --git a/subworkflows/nf-core/annotation_snpeff/main.nf b/subworkflows/nf-core/annotation_snpeff/main.nf deleted file mode 100644 index add5f9c8..00000000 --- a/subworkflows/nf-core/annotation_snpeff/main.nf +++ /dev/null @@ -1,23 +0,0 @@ -// -// Run SNPEFF to annotate VCF files -// - -include { SNPEFF } from '../../../modules/snpeff/main' -include { TABIX_BGZIPTABIX as ANNOTATION_BGZIPTABIX } from '../../../modules/tabix/bgziptabix/main' - -workflow ANNOTATION_SNPEFF { - take: - vcf // channel: [ val(meta), vcf ] - snpeff_db // value: version of db to use - snpeff_cache // path: path_to_snpeff_cache (optionnal) - - main: - SNPEFF(vcf, snpeff_db, snpeff_cache) - ANNOTATION_BGZIPTABIX(SNPEFF.out.vcf) - ch_versions = SNPEFF.out.versions.first().mix(ANNOTATION_BGZIPTABIX.out.versions.first()) - - emit: - vcf_tbi = ANNOTATION_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ] - reports = SNPEFF.out.report // path: *.html - versions = ch_versions // path: versions.yml -} diff --git a/tests/config/pytest_modules.yml b/tests/config/pytest_modules.yml index f4feb844..81f98d59 100644 --- a/tests/config/pytest_modules.yml +++ b/tests/config/pytest_modules.yml @@ -891,6 +891,14 @@ hamronization/summarize: - modules/hamronization/summarize/** - tests/modules/hamronization/summarize/** +happy/happy: + - modules/happy/happy/** + - tests/modules/happy/happy/** + +happy/prepy: + - modules/happy/prepy/** + - tests/modules/happy/prepy/** + hicap: - modules/hicap/** - tests/modules/hicap/** @@ -1050,6 +1058,10 @@ krona/kronadb: - modules/krona/kronadb/** - tests/modules/krona/kronadb/** +krona/ktupdatetaxonomy: + - modules/krona/ktupdatetaxonomy/** + - tests/modules/krona/ktupdatetaxonomy/** + krona/ktimporttaxonomy: - modules/krona/ktimporttaxonomy/** - tests/modules/krona/ktimporttaxonomy/** @@ -1242,6 +1254,10 @@ mosdepth: - modules/mosdepth/** - tests/modules/mosdepth/** +motus/downloaddb: + - modules/motus/downloaddb/** + - tests/modules/motus/downloaddb/** + msisensor/msi: - modules/msisensor/msi/** - tests/modules/msisensor/msi/** diff --git a/tests/config/test_data.config b/tests/config/test_data.config index 62e38c4d..e9a5f4ab 100644 --- a/tests/config/test_data.config +++ b/tests/config/test_data.config @@ -111,7 +111,9 @@ params { test_sequencing_summary = "${test_data_dir}/genomics/sarscov2/nanopore/sequencing_summary/test.sequencing_summary.txt" } 'metagenome' { + classified_reads_assignment = "${test_data_dir}/genomics/sarscov2/metagenome/test_1.kraken2.reads.txt" kraken_report = "${test_data_dir}/genomics/sarscov2/metagenome/test_1.kraken2.report.txt" + krona_taxonomy = "${test_data_dir}/genomics/sarscov2/metagenome/krona_taxonomy.tab" } } 'homo_sapiens' { diff --git a/tests/modules/ensemblvep/main.nf b/tests/modules/ensemblvep/main.nf index 223847c7..30d19957 100644 --- a/tests/modules/ensemblvep/main.nf +++ b/tests/modules/ensemblvep/main.nf @@ -10,5 +10,5 @@ workflow test_ensemblvep { file(params.test_data['sarscov2']['illumina']['test_vcf'], checkIfExists: true) ] - ENSEMBLVEP ( input, "WBcel235", "caenorhabditis_elegans", "104", [] ) + ENSEMBLVEP ( input, "WBcel235", "caenorhabditis_elegans", "104", [], [] ) } diff --git a/tests/modules/happy/happy/main.nf b/tests/modules/happy/happy/main.nf new file mode 100644 index 00000000..515a657b --- /dev/null +++ b/tests/modules/happy/happy/main.nf @@ -0,0 +1,39 @@ +#!/usr/bin/env nextflow + +nextflow.enable.dsl = 2 + +include { HAPPY_HAPPY } from '../../../../modules/happy/happy/main.nf' + +workflow test_happy_vcf { + + input = [ + [ id:'test' ], // meta map + file(params.test_data['homo_sapiens']['illumina']['test_rnaseq_vcf'], checkIfExists: true), + file(params.test_data['homo_sapiens']['illumina']['test_genome21_indels_vcf_gz'], checkIfExists: true), + file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true) + ] + + fasta = Channel.value([ + file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true), + file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true) + ]) + + HAPPY_HAPPY ( input, fasta ) +} + +workflow test_happy_gvcf { + + input = [ + [ id:'test' ], // meta map + file(params.test_data['homo_sapiens']['illumina']['test_rnaseq_vcf'], checkIfExists: true), + file(params.test_data['homo_sapiens']['illumina']['test_genome_vcf'], checkIfExists: true), + file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true) + ] + + fasta = Channel.value([ + file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true), + file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true) + ]) + + HAPPY_HAPPY ( input, fasta ) +} diff --git a/tests/modules/happy/happy/nextflow.config b/tests/modules/happy/happy/nextflow.config new file mode 100644 index 00000000..50f50a7a --- /dev/null +++ b/tests/modules/happy/happy/nextflow.config @@ -0,0 +1,5 @@ +process { + + publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" } + +} \ No newline at end of file diff --git a/tests/modules/happy/happy/test.yml b/tests/modules/happy/happy/test.yml new file mode 100644 index 00000000..89a1f012 --- /dev/null +++ b/tests/modules/happy/happy/test.yml @@ -0,0 +1,27 @@ +- name: happy happy test_happy_vcf + command: nextflow run tests/modules/happy/happy -entry test_happy_vcf -c tests/config/nextflow.config + tags: + - happy + - happy/happy + files: + - path: output/happy/test.extended.csv + md5sum: ef79c7c789ef4f146ca2e50dafaf22b3 + - path: output/happy/test.runinfo.json + - path: output/happy/test.summary.csv + md5sum: f8aa5d36d3c48dede2f607fd565894ad + - path: output/happy/versions.yml + md5sum: 82243bf6dbdc71aa63211ee2a89f47f2 + +- name: happy happy test_happy_gvcf + command: nextflow run tests/modules/happy/happy -entry test_happy_gvcf -c tests/config/nextflow.config + tags: + - happy + - happy/happy + files: + - path: output/happy/test.extended.csv + md5sum: 3d5c21b67a259a3f6dcb088d55b86cd3 + - path: output/happy/test.runinfo.json + - path: output/happy/test.summary.csv + md5sum: 03044e9bb5a0c6f0947b7e910fc8a558 + - path: output/happy/versions.yml + md5sum: 551fa216952d6f5de78e6e453b92aaab diff --git a/tests/modules/happy/prepy/main.nf b/tests/modules/happy/prepy/main.nf new file mode 100644 index 00000000..0b511104 --- /dev/null +++ b/tests/modules/happy/prepy/main.nf @@ -0,0 +1,37 @@ +#!/usr/bin/env nextflow + +nextflow.enable.dsl = 2 + +include { HAPPY_PREPY } from '../../../../modules/happy/prepy/main.nf' + +workflow test_happy_prepy_vcf { + + input = [ + [ id:'test' ], // meta map + file(params.test_data['homo_sapiens']['illumina']['test_genome21_indels_vcf_gz'], checkIfExists: true), + file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true) + ] + + fasta = Channel.value([ + file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true), + file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true) + ]) + + HAPPY_PREPY ( input, fasta ) +} + +workflow test_happy_prepy_gvcf { + + input = [ + [ id:'test' ], // meta map + file(params.test_data['homo_sapiens']['illumina']['test_genome_vcf'], checkIfExists: true), + file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true) + ] + + fasta = Channel.value([ + file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true), + file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true) + ]) + + HAPPY_PREPY ( input, fasta ) +} diff --git a/tests/modules/happy/prepy/nextflow.config b/tests/modules/happy/prepy/nextflow.config new file mode 100644 index 00000000..50f50a7a --- /dev/null +++ b/tests/modules/happy/prepy/nextflow.config @@ -0,0 +1,5 @@ +process { + + publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" } + +} \ No newline at end of file diff --git a/tests/modules/happy/prepy/test.yml b/tests/modules/happy/prepy/test.yml new file mode 100644 index 00000000..a0a45c38 --- /dev/null +++ b/tests/modules/happy/prepy/test.yml @@ -0,0 +1,19 @@ +- name: happy prepy test_happy_prepy_vcf + command: nextflow run tests/modules/happy/prepy -entry test_happy_prepy_vcf -c tests/config/nextflow.config + tags: + - happy/prepy + - happy + files: + - path: output/happy/test.vcf.gz + - path: output/happy/versions.yml + md5sum: 814d20f1f29f23a3d21012748a5d6393 + +- name: happy prepy test_happy_prepy_gvcf + command: nextflow run tests/modules/happy/prepy -entry test_happy_prepy_gvcf -c tests/config/nextflow.config + tags: + - happy/prepy + - happy + files: + - path: output/happy/test.vcf.gz + - path: output/happy/versions.yml + md5sum: 970a54de46e68ef6d5228a26eaa4c8e7 diff --git a/tests/modules/krona/ktimporttaxonomy/main.nf b/tests/modules/krona/ktimporttaxonomy/main.nf index a23e6fcb..7b39ee82 100644 --- a/tests/modules/krona/ktimporttaxonomy/main.nf +++ b/tests/modules/krona/ktimporttaxonomy/main.nf @@ -2,15 +2,27 @@ nextflow.enable.dsl = 2 -include { KRONA_KTIMPORTTAXONOMY } from '../../../../modules/krona/ktimporttaxonomy/main.nf' +include { KRONA_KTIMPORTTAXONOMY as KRONA_KTIMPORTTAXONOMY_READS } from '../../../../modules/krona/ktimporttaxonomy/main.nf' +include { KRONA_KTIMPORTTAXONOMY as KRONA_KTIMPORTTAXONOMY_REPORT } from '../../../../modules/krona/ktimporttaxonomy/main.nf' -workflow test_krona_ktimporttaxonomy { +workflow test_krona_ktimporttaxonomy_reads { input = [ [ id:'test', single_end:false ], // meta map - file(params.test_data['generic']['txt']['hello'], checkIfExists: true) + file(params.test_data['sarscov2']['metagenome']['classified_reads_assignment'], checkIfExists: true) ] - taxonomy = file(params.test_data['generic']['txt']['hello'], checkIfExists: true) + taxonomy = file(params.test_data['sarscov2']['metagenome']['krona_taxonomy'], checkIfExists: true) - KRONA_KTIMPORTTAXONOMY ( input, taxonomy ) + KRONA_KTIMPORTTAXONOMY_READS ( input, taxonomy ) +} + +workflow test_krona_ktimporttaxonomy_report { + + input = [ + [ id:'test', single_end:false ], // meta map + file(params.test_data['sarscov2']['metagenome']['kraken_report'], checkIfExists: true) + ] + taxonomy = file(params.test_data['sarscov2']['metagenome']['krona_taxonomy'], checkIfExists: true) + + KRONA_KTIMPORTTAXONOMY_REPORT ( input, taxonomy ) } diff --git a/tests/modules/krona/ktimporttaxonomy/nextflow.config b/tests/modules/krona/ktimporttaxonomy/nextflow.config index 8730f1c4..fb4d86f5 100644 --- a/tests/modules/krona/ktimporttaxonomy/nextflow.config +++ b/tests/modules/krona/ktimporttaxonomy/nextflow.config @@ -2,4 +2,12 @@ process { publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" } + withName: KRONA_KTIMPORTTAXONOMY_READS { + ext.args = '-t 3' + } + + withName: KRONA_KTIMPORTTAXONOMY_REPORT { + ext.args = '-m 3 -t 5' + } + } diff --git a/tests/modules/krona/ktimporttaxonomy/test.yml b/tests/modules/krona/ktimporttaxonomy/test.yml index b7748980..ffcb5669 100644 --- a/tests/modules/krona/ktimporttaxonomy/test.yml +++ b/tests/modules/krona/ktimporttaxonomy/test.yml @@ -1,9 +1,23 @@ -- name: krona ktimporttaxonomy test_krona_ktimporttaxonomy - command: nextflow run ./tests/modules/krona/ktimporttaxonomy -entry test_krona_ktimporttaxonomy -c ./tests/config/nextflow.config -c ./tests/modules/krona/ktimporttaxonomy/nextflow.config +- name: krona ktimporttaxonomy test_krona_ktimporttaxonomy_reads + command: nextflow run tests/modules/krona/ktimporttaxonomy -entry test_krona_ktimporttaxonomy_reads -c tests/config/nextflow.config tags: - - krona/ktimporttaxonomy - krona + - krona/ktimporttaxonomy files: - path: output/krona/taxonomy.krona.html contains: - "DOCTYPE html PUBLIC" + - path: output/krona/versions.yml + md5sum: 660a8c151191bf4c63bd96db2c7fe503 + +- name: krona ktimporttaxonomy test_krona_ktimporttaxonomy_report + command: nextflow run tests/modules/krona/ktimporttaxonomy -entry test_krona_ktimporttaxonomy_report -c tests/config/nextflow.config + tags: + - krona + - krona/ktimporttaxonomy + files: + - path: output/krona/taxonomy.krona.html + contains: + - "DOCTYPE html PUBLIC" + - path: output/krona/versions.yml + md5sum: 8a593c16bb2d4132638fb0fc342fe2b7 diff --git a/tests/modules/krona/ktupdatetaxonomy/main.nf b/tests/modules/krona/ktupdatetaxonomy/main.nf new file mode 100644 index 00000000..d2d18f31 --- /dev/null +++ b/tests/modules/krona/ktupdatetaxonomy/main.nf @@ -0,0 +1,9 @@ +#!/usr/bin/env nextflow + +nextflow.enable.dsl = 2 + +include { KRONA_KTUPDATETAXONOMY } from '../../../../modules/krona/ktupdatetaxonomy/main.nf' + +workflow test_krona_ktupdatetaxonomy { + KRONA_KTUPDATETAXONOMY ( ) +} diff --git a/tests/modules/krona/ktupdatetaxonomy/nextflow.config b/tests/modules/krona/ktupdatetaxonomy/nextflow.config new file mode 100644 index 00000000..8730f1c4 --- /dev/null +++ b/tests/modules/krona/ktupdatetaxonomy/nextflow.config @@ -0,0 +1,5 @@ +process { + + publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" } + +} diff --git a/tests/modules/krona/ktupdatetaxonomy/test.yml b/tests/modules/krona/ktupdatetaxonomy/test.yml new file mode 100644 index 00000000..b50fe8f2 --- /dev/null +++ b/tests/modules/krona/ktupdatetaxonomy/test.yml @@ -0,0 +1,7 @@ +- name: krona ktupdatetaxonomy test_krona_ktupdatetaxonomy + command: nextflow run ./tests/modules/krona/ktupdatetaxonomy -entry test_krona_ktupdatetaxonomy -c ./tests/config/nextflow.config -c ./tests/modules/krona/ktupdatetaxonomy/nextflow.config + tags: + - krona + - krona/ktupdatetaxonomy + files: + - path: output/krona/taxonomy/taxonomy.tab diff --git a/tests/modules/motus/downloaddb/main.nf b/tests/modules/motus/downloaddb/main.nf new file mode 100644 index 00000000..f7680ca9 --- /dev/null +++ b/tests/modules/motus/downloaddb/main.nf @@ -0,0 +1,12 @@ +#!/usr/bin/env nextflow + +nextflow.enable.dsl = 2 + +include { MOTUS_DOWNLOADDB } from '../../../../modules/motus/downloaddb/main.nf' + +workflow test_motus_downloaddb { + + input = file('https://raw.githubusercontent.com/motu-tool/mOTUs/master/motus/downloadDB.py') + + MOTUS_DOWNLOADDB ( input ) +} diff --git a/tests/modules/motus/downloaddb/nextflow.config b/tests/modules/motus/downloaddb/nextflow.config new file mode 100644 index 00000000..50f50a7a --- /dev/null +++ b/tests/modules/motus/downloaddb/nextflow.config @@ -0,0 +1,5 @@ +process { + + publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" } + +} \ No newline at end of file diff --git a/tests/modules/motus/downloaddb/test.yml b/tests/modules/motus/downloaddb/test.yml new file mode 100644 index 00000000..e61f937f --- /dev/null +++ b/tests/modules/motus/downloaddb/test.yml @@ -0,0 +1,7 @@ +- name: motus downloaddb test_motus_downloaddb + command: nextflow run tests/modules/motus/downloaddb -entry test_motus_downloaddb -c tests/config/nextflow.config + tags: + - motus + - motus/downloaddb + files: + - path: output/motus/db_mOTU/db_mOTU_versions diff --git a/tests/modules/samtools/bam2fq/test.yml b/tests/modules/samtools/bam2fq/test.yml index 213c7a2d..4f9472e5 100644 --- a/tests/modules/samtools/bam2fq/test.yml +++ b/tests/modules/samtools/bam2fq/test.yml @@ -1,14 +1,15 @@ - name: samtools bam2fq test_samtools_bam2fq_nosplit - command: nextflow run ./tests/modules/samtools/bam2fq -entry test_samtools_bam2fq_nosplit -c ./tests/config/nextflow.config -c ./tests/modules/samtools/bam2fq/nextflow.config + command: nextflow run tests/modules/samtools/bam2fq -entry test_samtools_bam2fq_nosplit -c tests/config/nextflow.config tags: - samtools/bam2fq - samtools files: - path: output/samtools/test_interleaved.fq.gz - md5sum: d733e66d29a4b366bf9df8c42f845256 + - path: output/samtools/versions.yml + md5sum: 4973eac1b6a8f090d5fcd4456d65a894 - name: samtools bam2fq test_samtools_bam2fq_withsplit - command: nextflow run ./tests/modules/samtools/bam2fq -entry test_samtools_bam2fq_withsplit -c ./tests/config/nextflow.config -c ./tests/modules/samtools/bam2fq/nextflow.config + command: nextflow run tests/modules/samtools/bam2fq -entry test_samtools_bam2fq_withsplit -c tests/config/nextflow.config tags: - samtools/bam2fq - samtools @@ -21,3 +22,5 @@ md5sum: 709872fc2910431b1e8b7074bfe38c67 - path: output/samtools/test_singleton.fq.gz md5sum: 709872fc2910431b1e8b7074bfe38c67 + - path: output/samtools/versions.yml + md5sum: e92d21bbcda2fed7cb438d95c51edff0 diff --git a/tests/subworkflows/nf-core/annotation_ensemblvep/main.nf b/tests/subworkflows/nf-core/annotation/ensemblvep/main.nf similarity index 69% rename from tests/subworkflows/nf-core/annotation_ensemblvep/main.nf rename to tests/subworkflows/nf-core/annotation/ensemblvep/main.nf index 0f00c62e..2c599671 100644 --- a/tests/subworkflows/nf-core/annotation_ensemblvep/main.nf +++ b/tests/subworkflows/nf-core/annotation/ensemblvep/main.nf @@ -2,7 +2,7 @@ nextflow.enable.dsl = 2 -include { ANNOTATION_ENSEMBLVEP } from '../../../../subworkflows/nf-core/annotation_ensemblvep/main' +include { ANNOTATION_ENSEMBLVEP } from '../../../../../subworkflows/nf-core/annotation/ensemblvep/main' workflow annotation_ensemblvep { input = [ @@ -10,5 +10,5 @@ workflow annotation_ensemblvep { file(params.test_data['sarscov2']['illumina']['test_vcf'], checkIfExists: true) ] - ANNOTATION_ENSEMBLVEP ( input, "WBcel235", "caenorhabditis_elegans", "104", [] ) + ANNOTATION_ENSEMBLVEP ( input, "WBcel235", "caenorhabditis_elegans", "104", [], [] ) } diff --git a/tests/subworkflows/nf-core/annotation_ensemblvep/nextflow.config b/tests/subworkflows/nf-core/annotation/ensemblvep/nextflow.config similarity index 88% rename from tests/subworkflows/nf-core/annotation_ensemblvep/nextflow.config rename to tests/subworkflows/nf-core/annotation/ensemblvep/nextflow.config index 4e8d2990..806adb58 100644 --- a/tests/subworkflows/nf-core/annotation_ensemblvep/nextflow.config +++ b/tests/subworkflows/nf-core/annotation/ensemblvep/nextflow.config @@ -7,7 +7,7 @@ process { publishDir = [ enabled: false ] } - withName: ANNOTATION_BGZIPTABIX { + withName: TABIX_BGZIPTABIX { ext.prefix = { "${meta.id}_VEP.ann.vcf" } } diff --git a/tests/subworkflows/nf-core/annotation_ensemblvep/test.yml b/tests/subworkflows/nf-core/annotation/ensemblvep/test.yml similarity index 100% rename from tests/subworkflows/nf-core/annotation_ensemblvep/test.yml rename to tests/subworkflows/nf-core/annotation/ensemblvep/test.yml diff --git a/tests/subworkflows/nf-core/annotation_snpeff/main.nf b/tests/subworkflows/nf-core/annotation/snpeff/main.nf similarity index 74% rename from tests/subworkflows/nf-core/annotation_snpeff/main.nf rename to tests/subworkflows/nf-core/annotation/snpeff/main.nf index c80197ee..4aee20ee 100644 --- a/tests/subworkflows/nf-core/annotation_snpeff/main.nf +++ b/tests/subworkflows/nf-core/annotation/snpeff/main.nf @@ -2,7 +2,7 @@ nextflow.enable.dsl = 2 -include { ANNOTATION_SNPEFF } from '../../../../subworkflows/nf-core/annotation_snpeff/main' +include { ANNOTATION_SNPEFF } from '../../../../../subworkflows/nf-core/annotation_snpeff/main' workflow annotation_snpeff { input = [ diff --git a/tests/subworkflows/nf-core/annotation_snpeff/nextflow.config b/tests/subworkflows/nf-core/annotation/snpeff/nextflow.config similarity index 88% rename from tests/subworkflows/nf-core/annotation_snpeff/nextflow.config rename to tests/subworkflows/nf-core/annotation/snpeff/nextflow.config index be76cb4a..31fd635b 100644 --- a/tests/subworkflows/nf-core/annotation_snpeff/nextflow.config +++ b/tests/subworkflows/nf-core/annotation/snpeff/nextflow.config @@ -7,7 +7,7 @@ process { publishDir = [ enabled: false ] } - withName: ANNOTATION_BGZIPTABIX { + withName: TABIX_BGZIPTABIX { ext.prefix = { "${meta.id}_snpEff.ann.vcf" } } diff --git a/tests/subworkflows/nf-core/annotation_snpeff/test.yml b/tests/subworkflows/nf-core/annotation/snpeff/test.yml similarity index 100% rename from tests/subworkflows/nf-core/annotation_snpeff/test.yml rename to tests/subworkflows/nf-core/annotation/snpeff/test.yml