mirror of
https://github.com/MillironX/nf-core_modules.git
synced 2024-11-13 05:13:09 +00:00
Merge branch 'master' into correcting_stubs
This commit is contained in:
commit
8e5fda3b10
84 changed files with 1268 additions and 199 deletions
|
@ -29,6 +29,8 @@ process BOWTIE2_ALIGN {
|
|||
def unaligned = save_unaligned ? "--un-gz ${prefix}.unmapped.fastq.gz" : ''
|
||||
"""
|
||||
INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
|
||||
[ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed 's/.rev.1.bt2l//'`
|
||||
[ -z "\$INDEX" ] && echo "BT2 index files not found" 1>&2 && exit 1
|
||||
bowtie2 \\
|
||||
-x \$INDEX \\
|
||||
-U $reads \\
|
||||
|
@ -49,6 +51,8 @@ process BOWTIE2_ALIGN {
|
|||
def unaligned = save_unaligned ? "--un-conc-gz ${prefix}.unmapped.fastq.gz" : ''
|
||||
"""
|
||||
INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
|
||||
[ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed 's/.rev.1.bt2l//'`
|
||||
[ -z "\$INDEX" ] && echo "BT2 index files not found" 1>&2 && exit 1
|
||||
bowtie2 \\
|
||||
-x \$INDEX \\
|
||||
-1 ${reads[0]} \\
|
||||
|
|
|
@ -2,43 +2,42 @@ process CNVPYTOR_CALLCNVS {
|
|||
tag "$meta.id"
|
||||
label 'process_medium'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
|
||||
conda (params.enable_conda ? "bioconda::cnvpytor=1.2.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
|
||||
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
|
||||
'https://depot.galaxyproject.org/singularity/cnvpytor:1.2.1--pyhdfd78af_0':
|
||||
'quay.io/biocontainers/cnvpytor:1.2.1--pyhdfd78af_0' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(pytor)
|
||||
val bin_sizes
|
||||
|
||||
output:
|
||||
tuple val(meta), path("*.tsv"), emit: cnvs
|
||||
path "versions.yml" , emit: versions
|
||||
tuple val(meta), path("${pytor.baseName}.pytor") , emit: pytor
|
||||
path "versions.yml" , emit: versions
|
||||
|
||||
when:
|
||||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: '1000'
|
||||
def prefix = task.ext.prefix ?: "${meta.id}"
|
||||
def bins = bin_sizes ?: '1000'
|
||||
"""
|
||||
cnvpytor \\
|
||||
-root $pytor \\
|
||||
-call $args > ${prefix}.tsv
|
||||
-call $bin_sizes
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
|
||||
stub:
|
||||
def prefix = task.ext.prefix ?: "${meta.id}"
|
||||
"""
|
||||
touch ${prefix}.tsv
|
||||
touch ${pytor.baseName}.pytor
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
||||
|
|
|
@ -17,8 +17,11 @@ input:
|
|||
e.g. [ id:'test']
|
||||
- pytor:
|
||||
type: file
|
||||
description: cnvpytor root file
|
||||
description: pytor file containing partitions of read depth histograms using mean-shift method
|
||||
pattern: "*.{pytor}"
|
||||
- bin_sizes:
|
||||
type: string
|
||||
description: list of binsizes separated by space e.g. "1000 10000" and "1000"
|
||||
|
||||
output:
|
||||
- meta:
|
||||
|
@ -26,10 +29,10 @@ output:
|
|||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test' ]
|
||||
- cnvs:
|
||||
- pytor:
|
||||
type: file
|
||||
description: file containing identified copy numer variations
|
||||
pattern: "*.{tsv}"
|
||||
description: pytor files containing cnv calls
|
||||
pattern: "*.{pytor}"
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
|
|
|
@ -2,13 +2,15 @@ process CNVPYTOR_HISTOGRAM {
|
|||
tag "$meta.id"
|
||||
label 'process_medium'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
|
||||
conda (params.enable_conda ? "bioconda::cnvpytor=1.2.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
|
||||
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
|
||||
'https://depot.galaxyproject.org/singularity/cnvpytor:1.2.1--pyhdfd78af_0':
|
||||
'quay.io/biocontainers/cnvpytor:1.2.1--pyhdfd78af_0' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(pytor)
|
||||
val bin_sizes
|
||||
|
||||
|
||||
output:
|
||||
tuple val(meta), path("${pytor.baseName}.pytor") , emit: pytor
|
||||
|
@ -18,15 +20,15 @@ process CNVPYTOR_HISTOGRAM {
|
|||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: '1000'
|
||||
def bins = bin_sizes ?: '1000'
|
||||
"""
|
||||
cnvpytor \\
|
||||
-root $pytor \\
|
||||
-his $args
|
||||
-his $bins
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
|
||||
|
@ -36,7 +38,7 @@ process CNVPYTOR_HISTOGRAM {
|
|||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
||||
|
|
|
@ -22,6 +22,9 @@ input:
|
|||
type: file
|
||||
description: pytor file containing read depth data
|
||||
pattern: "*.{pytor}"
|
||||
- bin_sizes:
|
||||
type: string
|
||||
description: list of binsizes separated by space e.g. "1000 10000" and "1000"
|
||||
|
||||
output:
|
||||
- meta:
|
||||
|
@ -40,3 +43,4 @@ output:
|
|||
|
||||
authors:
|
||||
- "@sima-r"
|
||||
- "@ramprasadn"
|
||||
|
|
|
@ -2,10 +2,10 @@ process CNVPYTOR_IMPORTREADDEPTH {
|
|||
tag "$meta.id"
|
||||
label 'process_medium'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
|
||||
conda (params.enable_conda ? "bioconda::cnvpytor=1.2.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
|
||||
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
|
||||
'https://depot.galaxyproject.org/singularity/cnvpytor:1.2.1--pyhdfd78af_0':
|
||||
'quay.io/biocontainers/cnvpytor:1.2.1--pyhdfd78af_0' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(input_file), path(index)
|
||||
|
@ -32,7 +32,7 @@ process CNVPYTOR_IMPORTREADDEPTH {
|
|||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
|
||||
|
@ -43,7 +43,7 @@ process CNVPYTOR_IMPORTREADDEPTH {
|
|||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
||||
|
|
|
@ -52,3 +52,4 @@ output:
|
|||
|
||||
authors:
|
||||
- "@sima-r"
|
||||
- "@ramprasadn"
|
||||
|
|
|
@ -2,13 +2,14 @@ process CNVPYTOR_PARTITION {
|
|||
tag "$meta.id"
|
||||
label 'process_medium'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
|
||||
conda (params.enable_conda ? "bioconda::cnvpytor=1.2.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
|
||||
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
|
||||
'https://depot.galaxyproject.org/singularity/cnvpytor:1.2.1--pyhdfd78af_0':
|
||||
'quay.io/biocontainers/cnvpytor:1.2.1--pyhdfd78af_0' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(pytor)
|
||||
val bin_sizes
|
||||
|
||||
output:
|
||||
tuple val(meta), path("${pytor.baseName}.pytor"), emit: pytor
|
||||
|
@ -18,15 +19,15 @@ process CNVPYTOR_PARTITION {
|
|||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
def bins = bin_sizes ?: '1000'
|
||||
"""
|
||||
cnvpytor \\
|
||||
-root $pytor \\
|
||||
-partition $args
|
||||
-partition $bins
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
|
||||
|
@ -36,7 +37,7 @@ process CNVPYTOR_PARTITION {
|
|||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
||||
|
|
|
@ -22,6 +22,9 @@ input:
|
|||
type: file
|
||||
description: pytor file containing read depth data
|
||||
pattern: "*.{pytor}"
|
||||
- bin_sizes:
|
||||
type: string
|
||||
description: list of binsizes separated by space e.g. "1000 10000" and "1000"
|
||||
|
||||
output:
|
||||
- meta:
|
||||
|
@ -40,3 +43,4 @@ output:
|
|||
|
||||
authors:
|
||||
- "@sima-r"
|
||||
- "@ramprasadn"
|
||||
|
|
60
modules/cnvpytor/view/main.nf
Normal file
60
modules/cnvpytor/view/main.nf
Normal file
|
@ -0,0 +1,60 @@
|
|||
process CNVPYTOR_VIEW {
|
||||
tag "$meta.id"
|
||||
label 'process_medium'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::cnvpytor=1.2.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/cnvpytor:1.2.1--pyhdfd78af_0':
|
||||
'quay.io/biocontainers/cnvpytor:1.2.1--pyhdfd78af_0' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(pytor_files)
|
||||
val bin_sizes
|
||||
val output_format
|
||||
|
||||
output:
|
||||
tuple val(meta), path("*.vcf"), emit: vcf , optional: true
|
||||
tuple val(meta), path("*.tsv"), emit: tsv , optional: true
|
||||
tuple val(meta), path("*.xls"), emit: xls , optional: true
|
||||
path "versions.yml" , emit: versions
|
||||
|
||||
when:
|
||||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def output_suffix = output_format ?: 'vcf'
|
||||
def bins = bin_sizes ?: '1000'
|
||||
def input = pytor_files.join(" ")
|
||||
def prefix = task.ext.prefix ?: "${meta.id}"
|
||||
"""
|
||||
|
||||
python3 <<CODE
|
||||
import cnvpytor,os
|
||||
binsizes = "${bins}".split(" ")
|
||||
for binsize in binsizes:
|
||||
file_list = "${input}".split(" ")
|
||||
app = cnvpytor.Viewer(file_list, params={} )
|
||||
outputfile = "{}_{}.{}".format("${prefix}",binsize.strip(),"${output_suffix}")
|
||||
app.print_filename = outputfile
|
||||
app.bin_size = int(binsize)
|
||||
app.print_calls_file()
|
||||
CODE
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
|
||||
stub:
|
||||
def output_suffix = output_format ?: 'vcf'
|
||||
def prefix = task.ext.prefix ?: "${meta.id}"
|
||||
"""
|
||||
touch ${prefix}.${output_suffix}
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
56
modules/cnvpytor/view/meta.yml
Normal file
56
modules/cnvpytor/view/meta.yml
Normal file
|
@ -0,0 +1,56 @@
|
|||
name: cnvpytor_view
|
||||
description: view function to generate vcfs
|
||||
keywords:
|
||||
- cnv calling
|
||||
tools:
|
||||
- cnvpytor:
|
||||
description: calling CNVs using read depth
|
||||
homepage: https://github.com/abyzovlab/CNVpytor
|
||||
documentation: https://github.com/abyzovlab/CNVpytor
|
||||
tool_dev_url: https://github.com/abyzovlab/CNVpytor
|
||||
doi: "10.1101/2021.01.27.428472v1"
|
||||
licence: ["MIT"]
|
||||
|
||||
input:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test' ]
|
||||
- pytor_files:
|
||||
type: file
|
||||
description: pytor file containing cnv calls. To merge calls from multiple samples use a list of files.
|
||||
pattern: "*.{pytor}"
|
||||
- bin_sizes:
|
||||
type: string
|
||||
description: list of binsizes separated by space e.g. "1000 10000" and "1000"
|
||||
- output_format:
|
||||
type: string
|
||||
description: output format of the cnv calls. Valid entries are "tsv", "vcf", and "xls"
|
||||
|
||||
output:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test' ]
|
||||
- tsv:
|
||||
type: file
|
||||
description: tsv file containing cnv calls
|
||||
pattern: "*.{tsv}"
|
||||
- vcf:
|
||||
type: file
|
||||
description: vcf file containing cnv calls
|
||||
pattern: "*.{vcf}"
|
||||
- xls:
|
||||
type: file
|
||||
description: xls file containing cnv calls
|
||||
pattern: "*.{xls}"
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
|
||||
authors:
|
||||
- "@sima-r"
|
||||
- "@ramprasadn"
|
|
@ -8,13 +8,14 @@ LABEL \
|
|||
COPY environment.yml /
|
||||
RUN conda env create -f /environment.yml && conda clean -a
|
||||
|
||||
# Add conda installation dir to PATH (instead of doing 'conda activate')
|
||||
ENV PATH /opt/conda/envs/nf-core-vep-104.3/bin:$PATH
|
||||
|
||||
# Setup default ARG variables
|
||||
ARG GENOME=GRCh38
|
||||
ARG SPECIES=homo_sapiens
|
||||
ARG VEP_VERSION=99
|
||||
ARG VEP_VERSION=104
|
||||
ARG VEP_TAG=104.3
|
||||
|
||||
# Add conda installation dir to PATH (instead of doing 'conda activate')
|
||||
ENV PATH /opt/conda/envs/nf-core-vep-${VEP_TAG}/bin:$PATH
|
||||
|
||||
# Download Genome
|
||||
RUN vep_install \
|
||||
|
@ -27,4 +28,4 @@ RUN vep_install \
|
|||
--NO_BIOPERL --NO_HTSLIB --NO_TEST --NO_UPDATE
|
||||
|
||||
# Dump the details of the installed packages to a file for posterity
|
||||
RUN conda env export --name nf-core-vep-104.3 > nf-core-vep-104.3.yml
|
||||
RUN conda env export --name nf-core-vep-${VEP_TAG} > nf-core-vep-${VEP_TAG}.yml
|
||||
|
|
|
@ -10,11 +10,12 @@ build_push() {
|
|||
VEP_TAG=$4
|
||||
|
||||
docker build \
|
||||
. \
|
||||
-t nfcore/vep:${VEP_TAG}.${GENOME} \
|
||||
software/vep/. \
|
||||
--build-arg GENOME=${GENOME} \
|
||||
--build-arg SPECIES=${SPECIES} \
|
||||
--build-arg VEP_VERSION=${VEP_VERSION}
|
||||
--build-arg VEP_VERSION=${VEP_VERSION} \
|
||||
--build-arg VEP_TAG=${VEP_TAG}
|
||||
|
||||
docker push nfcore/vep:${VEP_TAG}.${GENOME}
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ process ENSEMBLVEP {
|
|||
val species
|
||||
val cache_version
|
||||
path cache
|
||||
path extra_files
|
||||
|
||||
output:
|
||||
tuple val(meta), path("*.ann.vcf"), emit: vcf
|
||||
|
|
|
@ -10,17 +10,6 @@ tools:
|
|||
homepage: https://www.ensembl.org/info/docs/tools/vep/index.html
|
||||
documentation: https://www.ensembl.org/info/docs/tools/vep/script/index.html
|
||||
licence: ["Apache-2.0"]
|
||||
params:
|
||||
- use_cache:
|
||||
type: boolean
|
||||
description: |
|
||||
Enable the usage of containers with cache
|
||||
Does not work with conda
|
||||
- vep_tag:
|
||||
type: value
|
||||
description: |
|
||||
Specify the tag for the container
|
||||
https://hub.docker.com/r/nfcore/vep/tags
|
||||
input:
|
||||
- meta:
|
||||
type: map
|
||||
|
@ -47,6 +36,10 @@ input:
|
|||
type: file
|
||||
description: |
|
||||
path to VEP cache (optional)
|
||||
- extra_files:
|
||||
type: tuple
|
||||
description: |
|
||||
path to file(s) needed for plugins (optional)
|
||||
output:
|
||||
- vcf:
|
||||
type: file
|
||||
|
|
42
modules/happy/happy/main.nf
Normal file
42
modules/happy/happy/main.nf
Normal file
|
@ -0,0 +1,42 @@
|
|||
def VERSION = '0.3.14'
|
||||
|
||||
process HAPPY_HAPPY {
|
||||
tag "$meta.id"
|
||||
label 'process_medium'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::hap.py=0.3.14" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/hap.py:0.3.14--py27h5c5a3ab_0':
|
||||
'quay.io/biocontainers/hap.py:0.3.14--py27h5c5a3ab_0' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(truth_vcf), path(query_vcf), path(bed)
|
||||
tuple path(fasta), path(fasta_fai)
|
||||
|
||||
output:
|
||||
tuple val(meta), path('*.csv'), path('*.json') , emit: metrics
|
||||
path "versions.yml" , emit: versions
|
||||
|
||||
when:
|
||||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
def prefix = task.ext.prefix ?: "${meta.id}"
|
||||
|
||||
"""
|
||||
hap.py \\
|
||||
$truth_vcf \\
|
||||
$query_vcf \\
|
||||
$args \\
|
||||
--reference $fasta \\
|
||||
--threads $task.cpus \\
|
||||
-R $bed \\
|
||||
-o $prefix
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
hap.py: $VERSION
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
67
modules/happy/happy/meta.yml
Normal file
67
modules/happy/happy/meta.yml
Normal file
|
@ -0,0 +1,67 @@
|
|||
name: "happy_happy"
|
||||
description: Hap.py is a tool to compare diploid genotypes at haplotype level. Rather than comparing VCF records row by row, hap.py will generate and match alternate sequences in a superlocus. A superlocus is a small region of the genome (sized between 1 and around 1000 bp) that contains one or more variants.
|
||||
keywords:
|
||||
- happy
|
||||
- benchmark
|
||||
- haplotype
|
||||
tools:
|
||||
- "happy":
|
||||
description: "Haplotype VCF comparison tools"
|
||||
homepage: "https://www.illumina.com/products/by-type/informatics-products/basespace-sequence-hub/apps/hap-py-benchmarking.html"
|
||||
documentation: "https://github.com/Illumina/hap.py"
|
||||
tool_dev_url: "https://github.com/Illumina/hap.py"
|
||||
doi: ""
|
||||
licence: "['BSD-2-clause']"
|
||||
|
||||
input:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test', single_end:false ]
|
||||
- truth_vcf:
|
||||
type: file
|
||||
description: gold standard VCF file
|
||||
pattern: "*.{vcf,vcf.gz}"
|
||||
- query_vcf:
|
||||
type: file
|
||||
description: VCF/GVCF file to query
|
||||
pattern: "*.{vcf,vcf.gz}"
|
||||
- bed:
|
||||
type: file
|
||||
description: BED file
|
||||
pattern: "*.bed"
|
||||
- fasta:
|
||||
type: file
|
||||
description: FASTA file of the reference genome
|
||||
pattern: "*.{fa,fasta}"
|
||||
- fasta_fai:
|
||||
type: file
|
||||
description: The index of the reference FASTA
|
||||
pattern: "*.fai"
|
||||
|
||||
output:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test', single_end:false ]
|
||||
- summary:
|
||||
type: file
|
||||
description: A CSV file containing the summary of the benchmarking
|
||||
pattern: "*.summary.csv"
|
||||
- extended:
|
||||
type: file
|
||||
description: A CSV file containing extended info of the benchmarking
|
||||
pattern: "*.extended.csv"
|
||||
- runinfo:
|
||||
type: file
|
||||
description: A JSON file containing the run info
|
||||
pattern: "*.runinfo.json"
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
|
||||
authors:
|
||||
- "@nvnieuwk"
|
41
modules/happy/prepy/main.nf
Normal file
41
modules/happy/prepy/main.nf
Normal file
|
@ -0,0 +1,41 @@
|
|||
def VERSION = '0.3.14'
|
||||
|
||||
process HAPPY_PREPY {
|
||||
tag "$meta.id"
|
||||
label 'process_medium'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::hap.py=0.3.14" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/hap.py:0.3.14--py27h5c5a3ab_0':
|
||||
'quay.io/biocontainers/hap.py:0.3.14--py27h5c5a3ab_0' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(vcf), path(bed)
|
||||
tuple path(fasta), path(fasta_fai)
|
||||
|
||||
output:
|
||||
tuple val(meta), path('*.vcf.gz') , emit: preprocessed_vcf
|
||||
path "versions.yml" , emit: versions
|
||||
|
||||
when:
|
||||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
def prefix = task.ext.prefix ?: "${meta.id}"
|
||||
|
||||
"""
|
||||
pre.py \\
|
||||
$args \\
|
||||
-R $bed \\
|
||||
--reference $fasta \\
|
||||
--threads $task.cpus \\
|
||||
$vcf \\
|
||||
${prefix}.vcf.gz
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
pre.py: $VERSION
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
55
modules/happy/prepy/meta.yml
Normal file
55
modules/happy/prepy/meta.yml
Normal file
|
@ -0,0 +1,55 @@
|
|||
name: "happy_prepy"
|
||||
description: Pre.py is a preprocessing tool made to preprocess VCF files for Hap.py
|
||||
keywords:
|
||||
- happy
|
||||
- benchmark
|
||||
- haplotype
|
||||
tools:
|
||||
- "happy":
|
||||
description: "Haplotype VCF comparison tools"
|
||||
homepage: "https://www.illumina.com/products/by-type/informatics-products/basespace-sequence-hub/apps/hap-py-benchmarking.html"
|
||||
documentation: "https://github.com/Illumina/hap.py"
|
||||
tool_dev_url: "https://github.com/Illumina/hap.py"
|
||||
doi: ""
|
||||
licence: "['BSD-2-clause']"
|
||||
|
||||
input:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test', single_end:false ]
|
||||
- vcf:
|
||||
type: file
|
||||
description: VCF file to preprocess
|
||||
pattern: "*.{vcf,vcf.gz}"
|
||||
- bed:
|
||||
type: file
|
||||
description: BED file
|
||||
pattern: "*.bed"
|
||||
- fasta:
|
||||
type: file
|
||||
description: FASTA file of the reference genome
|
||||
pattern: "*.{fa,fasta}"
|
||||
- fasta_fai:
|
||||
type: file
|
||||
description: The index of the reference FASTA
|
||||
pattern: "*.fai"
|
||||
|
||||
output:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test', single_end:false ]
|
||||
- vcf:
|
||||
type: file
|
||||
description: A preprocessed VCF file
|
||||
pattern: "*.vcf.gz"
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
|
||||
authors:
|
||||
- "@nvnieuwk"
|
|
@ -18,7 +18,9 @@ process KRONA_KRONADB {
|
|||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
"""
|
||||
ktUpdateTaxonomy.sh taxonomy
|
||||
ktUpdateTaxonomy.sh \\
|
||||
$args \\
|
||||
taxonomy/
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
|
|
|
@ -23,7 +23,10 @@ process KRONA_KTIMPORTTAXONOMY {
|
|||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
"""
|
||||
ktImportTaxonomy "$report" -tax taxonomy
|
||||
ktImportTaxonomy \\
|
||||
$args \\
|
||||
-tax taxonomy/ \\
|
||||
"$report"
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
|
|
|
@ -23,8 +23,11 @@ input:
|
|||
Groovy Map containing sample information
|
||||
e.g. [ id:'test']
|
||||
- database:
|
||||
type: path
|
||||
description: "Path to the taxonomy database downloaded by krona/kronadb"
|
||||
type: file
|
||||
description: |
|
||||
Path to the taxonomy database .tab file downloaded by krona/ktUpdateTaxonomy
|
||||
The file will be saved under a folder named "taxonomy" as "taxonomy/taxonomy.tab".
|
||||
The parent folder will be passed as argument to ktImportTaxonomy.
|
||||
- report:
|
||||
type: file
|
||||
description: "A tab-delimited file with taxonomy IDs and (optionally) query IDs, magnitudes, and scores. Query IDs are taken from column 1, taxonomy IDs from column 2, and scores from column 3. Lines beginning with # will be ignored."
|
||||
|
|
30
modules/krona/ktupdatetaxonomy/main.nf
Normal file
30
modules/krona/ktupdatetaxonomy/main.nf
Normal file
|
@ -0,0 +1,30 @@
|
|||
def VERSION='2.7.1' // Version information not provided by tool on CLI
|
||||
|
||||
process KRONA_KTUPDATETAXONOMY {
|
||||
label 'process_low'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::krona=2.7.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/krona:2.7.1--pl526_5' :
|
||||
'quay.io/biocontainers/krona:2.7.1--pl526_5' }"
|
||||
|
||||
output:
|
||||
path 'taxonomy/taxonomy.tab', emit: db
|
||||
path "versions.yml" , emit: versions
|
||||
|
||||
when:
|
||||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
"""
|
||||
ktUpdateTaxonomy.sh \\
|
||||
$args \\
|
||||
taxonomy/
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
krona: $VERSION
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
31
modules/krona/ktupdatetaxonomy/meta.yml
Normal file
31
modules/krona/ktupdatetaxonomy/meta.yml
Normal file
|
@ -0,0 +1,31 @@
|
|||
name: krona_ktupdatetaxonomy
|
||||
description: KronaTools Update Taxonomy downloads a taxonomy database
|
||||
keywords:
|
||||
- database
|
||||
- taxonomy
|
||||
- krona
|
||||
- visualisation
|
||||
tools:
|
||||
- krona:
|
||||
description: Krona Tools is a set of scripts to create Krona charts from several Bioinformatics tools as well as from text and XML files.
|
||||
homepage: https://github.com/marbl/Krona/wiki/KronaTools
|
||||
documentation: https://github.com/marbl/Krona/wiki/Installing
|
||||
tool_dev_url:
|
||||
doi: https://doi.org/10.1186/1471-2105-12-385
|
||||
licence:
|
||||
|
||||
input:
|
||||
- none: There is no input. This module downloads a pre-built taxonomy database for use with Krona Tools.
|
||||
|
||||
output:
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
- db:
|
||||
type: file
|
||||
description: A TAB separated file that contains a taxonomy database.
|
||||
pattern: "*.{tab}"
|
||||
|
||||
authors:
|
||||
- "@mjakobs"
|
35
modules/md5sum/main.nf
Normal file
35
modules/md5sum/main.nf
Normal file
|
@ -0,0 +1,35 @@
|
|||
process MD5SUM {
|
||||
tag "$meta.id"
|
||||
label 'process_low'
|
||||
|
||||
conda (params.enable_conda ? "conda-forge::coreutils=9.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
|
||||
'ubuntu:20.04' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(file)
|
||||
|
||||
output:
|
||||
tuple val(meta), path("*.md5"), emit: checksum
|
||||
path "versions.yml" , emit: versions
|
||||
|
||||
when:
|
||||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
def prefix = task.ext.prefix ?: "${meta.id}"
|
||||
|
||||
"""
|
||||
md5sum \\
|
||||
$args \\
|
||||
${file} \\
|
||||
> ${file}.md5
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
md5sum: \$(echo \$(md5sum --version 2>&1 | head -n 1| sed 's/^.*) //;' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
39
modules/md5sum/meta.yml
Normal file
39
modules/md5sum/meta.yml
Normal file
|
@ -0,0 +1,39 @@
|
|||
name: "md5sum"
|
||||
description: Create an MD5 (128-bit) checksum
|
||||
keywords:
|
||||
- checksum
|
||||
tools:
|
||||
- "md5sum":
|
||||
description: Create an MD5 (128-bit) checksum
|
||||
homepage: "https://www.gnu.org"
|
||||
documentation: "https://man7.org/linux/man-pages/man1/md5sum.1.html"
|
||||
licence: GPLv3+
|
||||
|
||||
input:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test', single_end:false ]
|
||||
- file:
|
||||
type: file
|
||||
description: Any file
|
||||
pattern: "*.*"
|
||||
|
||||
output:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test', single_end:false ]
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
- checksum:
|
||||
type: file
|
||||
description: File containing checksum
|
||||
pattern: "*.md5"
|
||||
|
||||
authors:
|
||||
- "@matthdsm"
|
39
modules/motus/downloaddb/main.nf
Normal file
39
modules/motus/downloaddb/main.nf
Normal file
|
@ -0,0 +1,39 @@
|
|||
process MOTUS_DOWNLOADDB {
|
||||
label 'process_low'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::motus=3.0.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/motus:3.0.1--pyhdfd78af_0':
|
||||
'quay.io/biocontainers/motus:3.0.1--pyhdfd78af_0' }"
|
||||
|
||||
input:
|
||||
path motus_downloaddb_script
|
||||
|
||||
output:
|
||||
path "db_mOTU/" , emit: db
|
||||
path "versions.yml" , emit: versions
|
||||
|
||||
when:
|
||||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
def software = "${motus_downloaddb_script.simpleName}_copy.py"
|
||||
"""
|
||||
## must copy script file to working directory,
|
||||
## otherwise the reference_db will be download to bin folder
|
||||
## other than current directory
|
||||
cp $motus_downloaddb_script ${software}
|
||||
python ${software} \\
|
||||
$args \\
|
||||
-t $task.cpus
|
||||
|
||||
## mOTUs version number is not available from command line.
|
||||
## mOTUs save the version number in index database folder.
|
||||
## mOTUs will check the database version is same version as exec version.
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
mOTUs: \$(grep motus db_mOTU/db_mOTU_versions | sed 's/motus\\t//g')
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
39
modules/motus/downloaddb/meta.yml
Normal file
39
modules/motus/downloaddb/meta.yml
Normal file
|
@ -0,0 +1,39 @@
|
|||
name: "motus_downloaddb"
|
||||
description: Download the mOTUs database
|
||||
keywords:
|
||||
- classify
|
||||
- metagenomics
|
||||
- fastq
|
||||
- taxonomic profiling
|
||||
- database
|
||||
- download
|
||||
tools:
|
||||
- "motus":
|
||||
description: "The mOTU profiler is a computational tool that estimates relative taxonomic abundance of known and currently unknown microbial community members using metagenomic shotgun sequencing data."
|
||||
homepage: "None"
|
||||
documentation: "https://github.com/motu-tool/mOTUs/wiki"
|
||||
tool_dev_url: "https://github.com/motu-tool/mOTUs"
|
||||
doi: "10.1038/s41467-019-08844-4"
|
||||
licence: "['GPL v3']"
|
||||
|
||||
input:
|
||||
- motus_downloaddb:
|
||||
type: directory
|
||||
description: |
|
||||
The mOTUs downloadDB script source file.
|
||||
It is the source file installed or
|
||||
remote source in github such as https://raw.githubusercontent.com/motu-tool/mOTUs/master/motus/downloadDB.py
|
||||
pattern: "downloadDB.py"
|
||||
|
||||
output:
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
- db:
|
||||
type: directory
|
||||
description: The mOTUs database directory
|
||||
pattern: "db_mOTU"
|
||||
|
||||
authors:
|
||||
- "@jianhong"
|
|
@ -45,7 +45,7 @@ process SAMTOOLS_BAM2FQ {
|
|||
bam2fq \\
|
||||
$args \\
|
||||
-@ $task.cpus \\
|
||||
$inputbam >${prefix}_interleaved.fq.gz
|
||||
$inputbam | gzip --no-name > ${prefix}_interleaved.fq.gz
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
|
|
35
modules/shasum/main.nf
Normal file
35
modules/shasum/main.nf
Normal file
|
@ -0,0 +1,35 @@
|
|||
process SHASUM {
|
||||
tag "$meta.id"
|
||||
label 'process_low'
|
||||
|
||||
conda (params.enable_conda ? "conda-forge::coreutils=9.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
|
||||
'ubuntu:20.04' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(file)
|
||||
|
||||
output:
|
||||
tuple val(meta), path("*.sha256"), emit: checksum
|
||||
path "versions.yml" , emit: versions
|
||||
|
||||
when:
|
||||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
def prefix = task.ext.prefix ?: "${meta.id}"
|
||||
|
||||
"""
|
||||
sha256sum \\
|
||||
$args \\
|
||||
${file} \\
|
||||
> ${file}.sha256
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
sha256sum: \$(echo \$(sha256sum --version 2>&1 | head -n 1| sed 's/^.*) //;' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
40
modules/shasum/meta.yml
Normal file
40
modules/shasum/meta.yml
Normal file
|
@ -0,0 +1,40 @@
|
|||
name: "shasum"
|
||||
description: Print SHA256 (256-bit) checksums.
|
||||
keywords:
|
||||
- checksum
|
||||
- sha256
|
||||
tools:
|
||||
- "md5sum":
|
||||
description: Create an SHA256 (256-bit) checksum.
|
||||
homepage: "https://www.gnu.org"
|
||||
documentation: "https://linux.die.net/man/1/shasum"
|
||||
licence: GPLv3+
|
||||
|
||||
input:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test', single_end:false ]
|
||||
- file:
|
||||
type: file
|
||||
description: Any file
|
||||
pattern: "*.*"
|
||||
|
||||
output:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test', single_end:false ]
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
- checksum:
|
||||
type: file
|
||||
description: File containing checksum
|
||||
pattern: "*.sha256"
|
||||
|
||||
authors:
|
||||
- "@matthdsm"
|
|
@ -8,15 +8,16 @@ LABEL \
|
|||
COPY environment.yml /
|
||||
RUN conda env create -f /environment.yml && conda clean -a
|
||||
|
||||
# Add conda installation dir to PATH (instead of doing 'conda activate')
|
||||
ENV PATH /opt/conda/envs/nf-core-snpeff-5.0/bin:$PATH
|
||||
|
||||
# Setup default ARG variables
|
||||
ARG GENOME=GRCh38
|
||||
ARG SNPEFF_CACHE_VERSION=99
|
||||
ARG SNPEFF_TAG=99
|
||||
|
||||
# Add conda installation dir to PATH (instead of doing 'conda activate')
|
||||
ENV PATH /opt/conda/envs/nf-core-snpeff-${SNPEFF_TAG}/bin:$PATH
|
||||
|
||||
# Download Genome
|
||||
RUN snpEff download -v ${GENOME}.${SNPEFF_CACHE_VERSION}
|
||||
|
||||
# Dump the details of the installed packages to a file for posterity
|
||||
RUN conda env export --name nf-core-snpeff-5.0 > nf-core-snpeff-5.0.yml
|
||||
RUN conda env export --name nf-core-snpeff-${SNPEFF_TAG} > nf-core-snpeff-${SNPEFF_TAG}.yml
|
||||
|
|
5
modules/snpeff/build.sh
Executable file → Normal file
5
modules/snpeff/build.sh
Executable file → Normal file
|
@ -9,10 +9,11 @@ build_push() {
|
|||
SNPEFF_TAG=$3
|
||||
|
||||
docker build \
|
||||
. \
|
||||
-t nfcore/snpeff:${SNPEFF_TAG}.${GENOME} \
|
||||
software/snpeff/. \
|
||||
--build-arg GENOME=${GENOME} \
|
||||
--build-arg SNPEFF_CACHE_VERSION=${SNPEFF_CACHE_VERSION}
|
||||
--build-arg SNPEFF_CACHE_VERSION=${SNPEFF_CACHE_VERSION} \
|
||||
--build-arg SNPEFF_TAG=${SNPEFF_TAG}
|
||||
|
||||
docker push nfcore/snpeff:${SNPEFF_TAG}.${GENOME}
|
||||
}
|
||||
|
|
|
@ -10,18 +10,6 @@ tools:
|
|||
homepage: https://pcingola.github.io/SnpEff/
|
||||
documentation: https://pcingola.github.io/SnpEff/se_introduction/
|
||||
licence: ["MIT"]
|
||||
params:
|
||||
- use_cache:
|
||||
type: boolean
|
||||
description: |
|
||||
boolean to enable the usage of containers with cache
|
||||
Enable the usage of containers with cache
|
||||
Does not work with conda
|
||||
- snpeff_tag:
|
||||
type: value
|
||||
description: |
|
||||
Specify the tag for the container
|
||||
https://hub.docker.com/r/nfcore/snpeff/tags
|
||||
input:
|
||||
- meta:
|
||||
type: map
|
||||
|
|
|
@ -2,10 +2,10 @@ process SVDB_MERGE {
|
|||
tag "$meta.id"
|
||||
label 'process_medium'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::svdb=2.6.0" : null)
|
||||
conda (params.enable_conda ? "bioconda::svdb=2.6.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/svdb:2.6.0--py39h5371cbf_0':
|
||||
'quay.io/biocontainers/svdb:2.6.0--py39h5371cbf_0' }"
|
||||
'https://depot.galaxyproject.org/singularity/svdb:2.6.1--py39h5371cbf_0':
|
||||
'quay.io/biocontainers/svdb:2.6.1--py39h5371cbf_0' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(vcfs)
|
||||
|
|
|
@ -2,10 +2,10 @@ process SVDB_QUERY {
|
|||
tag "$meta.id"
|
||||
label 'process_medium'
|
||||
|
||||
conda (params.enable_conda ? "bioconda::svdb=2.6.0" : null)
|
||||
conda (params.enable_conda ? "bioconda::svdb=2.6.1" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/svdb:2.6.0--py39h5371cbf_0':
|
||||
'quay.io/biocontainers/svdb:2.6.0--py39h5371cbf_0' }"
|
||||
'https://depot.galaxyproject.org/singularity/svdb:2.6.1--py39h5371cbf_0':
|
||||
'quay.io/biocontainers/svdb:2.6.1--py39h5371cbf_0' }"
|
||||
|
||||
input:
|
||||
tuple val(meta), path(vcf)
|
||||
|
|
31
subworkflows/nf-core/annotation/ensemblvep/main.nf
Normal file
31
subworkflows/nf-core/annotation/ensemblvep/main.nf
Normal file
|
@ -0,0 +1,31 @@
|
|||
//
|
||||
// Run VEP to annotate VCF files
|
||||
//
|
||||
|
||||
include { ENSEMBLVEP } from '../../../../modules/ensemblvep/main'
|
||||
include { TABIX_BGZIPTABIX } from '../../../../modules/tabix/bgziptabix/main'
|
||||
|
||||
workflow ANNOTATION_ENSEMBLVEP {
|
||||
take:
|
||||
vcf // channel: [ val(meta), vcf ]
|
||||
vep_genome // value: genome to use
|
||||
vep_species // value: species to use
|
||||
vep_cache_version // value: cache version to use
|
||||
vep_cache // path: /path/to/vep/cache (optionnal)
|
||||
vep_extra_files // channel: [ file1, file2...] (optionnal)
|
||||
|
||||
main:
|
||||
ch_versions = Channel.empty()
|
||||
|
||||
ENSEMBLVEP(vcf, vep_genome, vep_species, vep_cache_version, vep_cache, vep_extra_files)
|
||||
TABIX_BGZIPTABIX(ENSEMBLVEP.out.vcf)
|
||||
|
||||
// Gather versions of all tools used
|
||||
ch_versions = ch_versions.mix(ENSEMBLVEP.out.versions.first())
|
||||
ch_versions = ch_versions.mix(TABIX_BGZIPTABIX.out.versions.first())
|
||||
|
||||
emit:
|
||||
vcf_tbi = TABIX_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ]
|
||||
reports = ENSEMBLVEP.out.report // path: *.html
|
||||
versions = ch_versions // path: versions.yml
|
||||
}
|
49
subworkflows/nf-core/annotation/ensemblvep/meta.yml
Normal file
49
subworkflows/nf-core/annotation/ensemblvep/meta.yml
Normal file
|
@ -0,0 +1,49 @@
|
|||
name: annotation_ensemblvep
|
||||
description: |
|
||||
Perform annotation with ensemblvep and bgzip + tabix index the resulting VCF file
|
||||
keywords:
|
||||
- ensemblvep
|
||||
modules:
|
||||
- ensemblvep
|
||||
- tabix/bgziptabix
|
||||
input:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test', single_end:false ]
|
||||
- vcf:
|
||||
type: file
|
||||
description: |
|
||||
vcf to annotate
|
||||
- genome:
|
||||
type: value
|
||||
description: |
|
||||
which genome to annotate with
|
||||
- species:
|
||||
type: value
|
||||
description: |
|
||||
which species to annotate with
|
||||
- cache_version:
|
||||
type: value
|
||||
description: |
|
||||
which version of the cache to annotate with
|
||||
- cache:
|
||||
type: file
|
||||
description: |
|
||||
path to VEP cache (optional)
|
||||
- extra_files:
|
||||
type: tuple
|
||||
description: |
|
||||
path to file(s) needed for plugins (optional)
|
||||
output:
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
- vcf_tbi:
|
||||
type: file
|
||||
description: Compressed vcf file + tabix index
|
||||
pattern: "[ *{.vcf.gz,vcf.gz.tbi} ]"
|
||||
authors:
|
||||
- "@maxulysse"
|
28
subworkflows/nf-core/annotation/snpeff/main.nf
Normal file
28
subworkflows/nf-core/annotation/snpeff/main.nf
Normal file
|
@ -0,0 +1,28 @@
|
|||
//
|
||||
// Run SNPEFF to annotate VCF files
|
||||
//
|
||||
|
||||
include { SNPEFF } from '../../../../modules/snpeff/main'
|
||||
include { TABIX_BGZIPTABIX } from '../../../../modules/tabix/bgziptabix/main'
|
||||
|
||||
workflow ANNOTATION_SNPEFF {
|
||||
take:
|
||||
vcf // channel: [ val(meta), vcf ]
|
||||
snpeff_db // value: db version to use
|
||||
snpeff_cache // path: /path/to/snpeff/cache (optionnal)
|
||||
|
||||
main:
|
||||
ch_versions = Channel.empty()
|
||||
|
||||
SNPEFF(vcf, snpeff_db, snpeff_cache)
|
||||
TABIX_BGZIPTABIX(SNPEFF.out.vcf)
|
||||
|
||||
// Gather versions of all tools used
|
||||
ch_versions = ch_versions.mix(SNPEFF.out.versions.first())
|
||||
ch_versions = ch_versions.mix(TABIX_BGZIPTABIX.out.versions.first())
|
||||
|
||||
emit:
|
||||
vcf_tbi = TABIX_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ]
|
||||
reports = SNPEFF.out.report // path: *.html
|
||||
versions = ch_versions // path: versions.yml
|
||||
}
|
|
@ -11,11 +11,19 @@ input:
|
|||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test' ]
|
||||
- input:
|
||||
type: vcf
|
||||
description: list containing one vcf file
|
||||
pattern: "[ *.{vcf,vcf.gz} ]"
|
||||
e.g. [ id:'test', single_end:false ]
|
||||
- vcf:
|
||||
type: file
|
||||
description: |
|
||||
vcf to annotate
|
||||
- db:
|
||||
type: value
|
||||
description: |
|
||||
which db to annotate with
|
||||
- cache:
|
||||
type: file
|
||||
description: |
|
||||
path to snpEff cache (optional)
|
||||
output:
|
||||
- versions:
|
||||
type: file
|
|
@ -1,26 +0,0 @@
|
|||
//
|
||||
// Run VEP to annotate VCF files
|
||||
//
|
||||
|
||||
include { ENSEMBLVEP } from '../../../modules/ensemblvep/main'
|
||||
include { TABIX_BGZIPTABIX as ANNOTATION_BGZIPTABIX } from '../../../modules/tabix/bgziptabix/main'
|
||||
|
||||
workflow ANNOTATION_ENSEMBLVEP {
|
||||
take:
|
||||
vcf // channel: [ val(meta), vcf ]
|
||||
vep_genome // value: which genome
|
||||
vep_species // value: which species
|
||||
vep_cache_version // value: which cache version
|
||||
vep_cache // path: path_to_vep_cache (optionnal)
|
||||
|
||||
main:
|
||||
ENSEMBLVEP(vcf, vep_genome, vep_species, vep_cache_version, vep_cache)
|
||||
ANNOTATION_BGZIPTABIX(ENSEMBLVEP.out.vcf)
|
||||
|
||||
ch_versions = ENSEMBLVEP.out.versions.first().mix(ANNOTATION_BGZIPTABIX.out.versions.first())
|
||||
|
||||
emit:
|
||||
vcf_tbi = ANNOTATION_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ]
|
||||
reports = ENSEMBLVEP.out.report // path: *.html
|
||||
versions = ch_versions // path: versions.yml
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
name: annotation_ensemblvep
|
||||
description: |
|
||||
Perform annotation with ensemblvep and bgzip + tabix index the resulting VCF file
|
||||
keywords:
|
||||
- ensemblvep
|
||||
modules:
|
||||
- ensemblvep
|
||||
- tabix/bgziptabix
|
||||
input:
|
||||
- meta:
|
||||
type: map
|
||||
description: |
|
||||
Groovy Map containing sample information
|
||||
e.g. [ id:'test' ]
|
||||
- input:
|
||||
type: vcf
|
||||
description: list containing one vcf file
|
||||
pattern: "[ *.{vcf,vcf.gz} ]"
|
||||
output:
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
- vcf_tbi:
|
||||
type: file
|
||||
description: Compressed vcf file + tabix index
|
||||
pattern: "[ *{.vcf.gz,vcf.gz.tbi} ]"
|
||||
authors:
|
||||
- "@maxulysse"
|
|
@ -1,23 +0,0 @@
|
|||
//
|
||||
// Run SNPEFF to annotate VCF files
|
||||
//
|
||||
|
||||
include { SNPEFF } from '../../../modules/snpeff/main'
|
||||
include { TABIX_BGZIPTABIX as ANNOTATION_BGZIPTABIX } from '../../../modules/tabix/bgziptabix/main'
|
||||
|
||||
workflow ANNOTATION_SNPEFF {
|
||||
take:
|
||||
vcf // channel: [ val(meta), vcf ]
|
||||
snpeff_db // value: version of db to use
|
||||
snpeff_cache // path: path_to_snpeff_cache (optionnal)
|
||||
|
||||
main:
|
||||
SNPEFF(vcf, snpeff_db, snpeff_cache)
|
||||
ANNOTATION_BGZIPTABIX(SNPEFF.out.vcf)
|
||||
ch_versions = SNPEFF.out.versions.first().mix(ANNOTATION_BGZIPTABIX.out.versions.first())
|
||||
|
||||
emit:
|
||||
vcf_tbi = ANNOTATION_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ]
|
||||
reports = SNPEFF.out.report // path: *.html
|
||||
versions = ch_versions // path: versions.yml
|
||||
}
|
|
@ -891,6 +891,14 @@ hamronization/summarize:
|
|||
- modules/hamronization/summarize/**
|
||||
- tests/modules/hamronization/summarize/**
|
||||
|
||||
happy/happy:
|
||||
- modules/happy/happy/**
|
||||
- tests/modules/happy/happy/**
|
||||
|
||||
happy/prepy:
|
||||
- modules/happy/prepy/**
|
||||
- tests/modules/happy/prepy/**
|
||||
|
||||
hicap:
|
||||
- modules/hicap/**
|
||||
- tests/modules/hicap/**
|
||||
|
@ -1058,6 +1066,10 @@ krona/ktimporttext:
|
|||
- modules/krona/ktimporttext/**
|
||||
- tests/modules/krona/ktimporttext/**
|
||||
|
||||
krona/ktupdatetaxonomy:
|
||||
- modules/krona/ktupdatetaxonomy/**
|
||||
- tests/modules/krona/ktupdatetaxonomy/**
|
||||
|
||||
last/dotplot:
|
||||
- modules/last/dotplot/**
|
||||
- tests/modules/last/dotplot/**
|
||||
|
@ -1178,6 +1190,10 @@ maxbin2:
|
|||
- modules/maxbin2/**
|
||||
- tests/modules/maxbin2/**
|
||||
|
||||
md5sum:
|
||||
- modules/md5sum/**
|
||||
- tests/modules/md5sum/**
|
||||
|
||||
medaka:
|
||||
- modules/medaka/**
|
||||
- tests/modules/medaka/**
|
||||
|
@ -1242,6 +1258,10 @@ mosdepth:
|
|||
- modules/mosdepth/**
|
||||
- tests/modules/mosdepth/**
|
||||
|
||||
motus/downloaddb:
|
||||
- modules/motus/downloaddb/**
|
||||
- tests/modules/motus/downloaddb/**
|
||||
|
||||
msisensor/msi:
|
||||
- modules/msisensor/msi/**
|
||||
- tests/modules/msisensor/msi/**
|
||||
|
@ -1719,6 +1739,10 @@ seqwish/induce:
|
|||
- modules/seqwish/induce/**
|
||||
- tests/modules/seqwish/induce/**
|
||||
|
||||
shasum:
|
||||
- modules/shasum/**
|
||||
- tests/modules/shasum/**
|
||||
|
||||
shigatyper:
|
||||
- modules/shigatyper/**
|
||||
- tests/modules/shigatyper/**
|
||||
|
|
|
@ -111,7 +111,9 @@ params {
|
|||
test_sequencing_summary = "${test_data_dir}/genomics/sarscov2/nanopore/sequencing_summary/test.sequencing_summary.txt"
|
||||
}
|
||||
'metagenome' {
|
||||
classified_reads_assignment = "${test_data_dir}/genomics/sarscov2/metagenome/test_1.kraken2.reads.txt"
|
||||
kraken_report = "${test_data_dir}/genomics/sarscov2/metagenome/test_1.kraken2.report.txt"
|
||||
krona_taxonomy = "${test_data_dir}/genomics/sarscov2/metagenome/krona_taxonomy.tab"
|
||||
}
|
||||
}
|
||||
'homo_sapiens' {
|
||||
|
|
|
@ -32,4 +32,4 @@ workflow test_bowtie2_align_paired_end {
|
|||
|
||||
BOWTIE2_BUILD ( fasta )
|
||||
BOWTIE2_ALIGN ( input, BOWTIE2_BUILD.out.index, save_unaligned )
|
||||
}
|
||||
}
|
|
@ -1,5 +1,16 @@
|
|||
params {
|
||||
force_large_index = false
|
||||
}
|
||||
|
||||
process {
|
||||
|
||||
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
|
||||
|
||||
}
|
||||
|
||||
if (params.force_large_index) {
|
||||
process {
|
||||
withName: BOWTIE2_BUILD {
|
||||
ext.args = '--large-index'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,3 +39,45 @@
|
|||
md5sum: 52be6950579598a990570fbcf5372184
|
||||
- path: ./output/bowtie2/bowtie2/genome.rev.2.bt2
|
||||
md5sum: e3b4ef343dea4dd571642010a7d09597
|
||||
|
||||
- name: bowtie2 align single-end large-index
|
||||
command: nextflow run ./tests/modules/bowtie2/align -entry test_bowtie2_align_single_end -c ./tests/config/nextflow.config -c ./tests/modules/bowtie2/align/nextflow.config --force_large_index
|
||||
tags:
|
||||
- bowtie2
|
||||
- bowtie2/align
|
||||
files:
|
||||
- path: ./output/bowtie2/test.bam
|
||||
- path: ./output/bowtie2/test.bowtie2.log
|
||||
- path: ./output/bowtie2/bowtie2/genome.3.bt2l
|
||||
md5sum: 8952b3e0b1ce9a7a5916f2e147180853
|
||||
- path: ./output/bowtie2/bowtie2/genome.2.bt2l
|
||||
md5sum: 22c284084784a0720989595e0c9461fd
|
||||
- path: ./output/bowtie2/bowtie2/genome.1.bt2l
|
||||
md5sum: 07d811cd4e350d56267183d2ac7023a5
|
||||
- path: ./output/bowtie2/bowtie2/genome.4.bt2l
|
||||
md5sum: c25be5f8b0378abf7a58c8a880b87626
|
||||
- path: ./output/bowtie2/bowtie2/genome.rev.1.bt2l
|
||||
md5sum: fda48e35925fb24d1c0785f021981e25
|
||||
- path: ./output/bowtie2/bowtie2/genome.rev.2.bt2l
|
||||
md5sum: 802c26d32b970e1b105032b7ce7348b4
|
||||
|
||||
- name: bowtie2 align paired-end large-index
|
||||
command: nextflow run ./tests/modules/bowtie2/align -entry test_bowtie2_align_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/bowtie2/align/nextflow.config --force_large_index
|
||||
tags:
|
||||
- bowtie2
|
||||
- bowtie2/align
|
||||
files:
|
||||
- path: ./output/bowtie2/test.bam
|
||||
- path: ./output/bowtie2/test.bowtie2.log
|
||||
- path: ./output/bowtie2/bowtie2/genome.3.bt2l
|
||||
md5sum: 8952b3e0b1ce9a7a5916f2e147180853
|
||||
- path: ./output/bowtie2/bowtie2/genome.2.bt2l
|
||||
md5sum: 22c284084784a0720989595e0c9461fd
|
||||
- path: ./output/bowtie2/bowtie2/genome.1.bt2l
|
||||
md5sum: 07d811cd4e350d56267183d2ac7023a5
|
||||
- path: ./output/bowtie2/bowtie2/genome.4.bt2l
|
||||
md5sum: c25be5f8b0378abf7a58c8a880b87626
|
||||
- path: ./output/bowtie2/bowtie2/genome.rev.1.bt2l
|
||||
md5sum: fda48e35925fb24d1c0785f021981e25
|
||||
- path: ./output/bowtie2/bowtie2/genome.rev.2.bt2l
|
||||
md5sum: 802c26d32b970e1b105032b7ce7348b4
|
||||
|
|
|
@ -4,10 +4,8 @@
|
|||
- cnvpytor
|
||||
- cnvpytor/callcnvs
|
||||
files:
|
||||
- path: output/cnvpytor/test.tsv
|
||||
md5sum: d41d8cd98f00b204e9800998ecf8427e
|
||||
- path: output/cnvpytor/test.pytor
|
||||
- path: output/cnvpytor/versions.yml
|
||||
md5sum: 0bea08a253fcb2ff0ff79b99df77b9fa
|
||||
|
||||
- name: cnvpytor callcnvs test_cnvpytor_callcnvs stub
|
||||
command: nextflow run tests/modules/cnvpytor/callcnvs -entry test_cnvpytor_callcnvs -c tests/config/nextflow.config -stub-run
|
||||
|
@ -15,6 +13,5 @@
|
|||
- cnvpytor
|
||||
- cnvpytor/callcnvs
|
||||
files:
|
||||
- path: output/cnvpytor/test.tsv
|
||||
- path: output/cnvpytor/test.pytor
|
||||
- path: output/cnvpytor/versions.yml
|
||||
md5sum: 0bea08a253fcb2ff0ff79b99df77b9fa
|
||||
|
|
|
@ -5,9 +5,7 @@
|
|||
- cnvpytor/histogram
|
||||
files:
|
||||
- path: output/cnvpytor/test.pytor
|
||||
md5sum: aa03a8fa15b39f77816705a48e10312a
|
||||
- path: output/cnvpytor/versions.yml
|
||||
md5sum: 0f4d75c4f3a3eb26c22616d12b0b78b2
|
||||
|
||||
- name: cnvpytor histogram test_cnvpytor_histogram stub
|
||||
command: nextflow run tests/modules/cnvpytor/histogram -entry test_cnvpytor_histogram -c tests/config/nextflow.config -stub-run
|
||||
|
@ -17,4 +15,3 @@
|
|||
files:
|
||||
- path: output/cnvpytor/test.pytor
|
||||
- path: output/cnvpytor/versions.yml
|
||||
md5sum: 0f4d75c4f3a3eb26c22616d12b0b78b2
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
files:
|
||||
- path: output/cnvpytor/test.pytor
|
||||
- path: output/cnvpytor/versions.yml
|
||||
md5sum: 5834495324c08a37f3fd73ccdd881dc8
|
||||
|
||||
- name: cnvpytor importreaddepth test_cnvpytor_importreaddepth stub
|
||||
command: nextflow run tests/modules/cnvpytor/importreaddepth -entry test_cnvpytor_importreaddepth -c tests/config/nextflow.config -stub-run
|
||||
|
@ -16,7 +15,6 @@
|
|||
files:
|
||||
- path: output/cnvpytor/test.pytor
|
||||
- path: output/cnvpytor/versions.yml
|
||||
md5sum: 5834495324c08a37f3fd73ccdd881dc8
|
||||
|
||||
- name: cnvpytor importreaddepth test_cnvpytor_importreaddepth_cram
|
||||
command: nextflow run tests/modules/cnvpytor/importreaddepth -entry test_cnvpytor_importreaddepth_cram -c tests/config/nextflow.config
|
||||
|
@ -26,7 +24,6 @@
|
|||
files:
|
||||
- path: output/cnvpytor/test.pytor
|
||||
- path: output/cnvpytor/versions.yml
|
||||
md5sum: dfa0afb0982d985b96d1633f71ebb82a
|
||||
|
||||
- name: cnvpytor importreaddepth test_cnvpytor_importreaddepth_cram stub
|
||||
command: nextflow run tests/modules/cnvpytor/importreaddepth -entry test_cnvpytor_importreaddepth_cram -c tests/config/nextflow.config -stub-run
|
||||
|
@ -36,4 +33,3 @@
|
|||
files:
|
||||
- path: output/cnvpytor/test.pytor
|
||||
- path: output/cnvpytor/versions.yml
|
||||
md5sum: dfa0afb0982d985b96d1633f71ebb82a
|
||||
|
|
|
@ -5,9 +5,7 @@
|
|||
- cnvpytor/partition
|
||||
files:
|
||||
- path: output/cnvpytor/test.pytor
|
||||
md5sum: aa03a8fa15b39f77816705a48e10312a
|
||||
- path: output/cnvpytor/versions.yml
|
||||
md5sum: 7fd6ec952a316463bcd324f176b46b64
|
||||
|
||||
- name: cnvpytor partition test_cnvpytor_partition stub
|
||||
command: nextflow run tests/modules/cnvpytor/partition -entry test_cnvpytor_partition -c tests/config/nextflow.config -stub-run
|
||||
|
@ -17,4 +15,3 @@
|
|||
files:
|
||||
- path: output/cnvpytor/test.pytor
|
||||
- path: output/cnvpytor/versions.yml
|
||||
md5sum: 7fd6ec952a316463bcd324f176b46b64
|
||||
|
|
42
tests/modules/cnvpytor/view/main.nf
Normal file
42
tests/modules/cnvpytor/view/main.nf
Normal file
|
@ -0,0 +1,42 @@
|
|||
#!/usr/bin/env nextflow
|
||||
|
||||
nextflow.enable.dsl = 2
|
||||
|
||||
include { CNVPYTOR_VIEW } from '../../../../modules/cnvpytor/view/main.nf'
|
||||
|
||||
workflow test_cnvpytor_view {
|
||||
|
||||
input = [
|
||||
[ id:'test'], // meta map
|
||||
[file(params.test_data['homo_sapiens']['illumina']['test_pytor'], checkIfExists: true)]
|
||||
]
|
||||
|
||||
bin_sizes = "10000 100000"
|
||||
|
||||
CNVPYTOR_VIEW ( input, bin_sizes, [] )
|
||||
}
|
||||
|
||||
workflow test_cnvpytor_view_tsvout {
|
||||
|
||||
input = [
|
||||
[ id:'test'], // meta map
|
||||
[file(params.test_data['homo_sapiens']['illumina']['test_pytor'], checkIfExists: true)]
|
||||
]
|
||||
|
||||
output_suffix = "tsv"
|
||||
|
||||
CNVPYTOR_VIEW ( input, [], output_suffix )
|
||||
}
|
||||
|
||||
workflow test_cnvpytor_view_stub {
|
||||
|
||||
input = [
|
||||
[ id:'test'], // meta map
|
||||
[file(params.test_data['homo_sapiens']['illumina']['test_pytor'], checkIfExists: true)]
|
||||
]
|
||||
|
||||
bin_sizes = []
|
||||
output_suffix = []
|
||||
|
||||
CNVPYTOR_VIEW ( input, bin_sizes, output_suffix )
|
||||
}
|
7
tests/modules/cnvpytor/view/nextflow.config
Normal file
7
tests/modules/cnvpytor/view/nextflow.config
Normal file
|
@ -0,0 +1,7 @@
|
|||
process {
|
||||
|
||||
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
|
||||
withName: CNVPYTOR_VIEW {
|
||||
ext.args = '10000 100000'
|
||||
}
|
||||
}
|
27
tests/modules/cnvpytor/view/test.yml
Normal file
27
tests/modules/cnvpytor/view/test.yml
Normal file
|
@ -0,0 +1,27 @@
|
|||
- name: cnvpytor view test_cnvpytor_view
|
||||
command: nextflow run tests/modules/cnvpytor/view -entry test_cnvpytor_view -c tests/config/nextflow.config
|
||||
tags:
|
||||
- cnvpytor
|
||||
- cnvpytor/view
|
||||
files:
|
||||
- path: output/cnvpytor/test_10000.vcf
|
||||
- path: output/cnvpytor/test_100000.vcf
|
||||
- path: output/cnvpytor/versions.yml
|
||||
|
||||
- name: cnvpytor view test_cnvpytor_view tsv
|
||||
command: nextflow run tests/modules/cnvpytor/view -entry test_cnvpytor_view_tsvout -c tests/config/nextflow.config
|
||||
tags:
|
||||
- cnvpytor
|
||||
- cnvpytor/view
|
||||
files:
|
||||
- path: output/cnvpytor/test_1000.tsv
|
||||
- path: output/cnvpytor/versions.yml
|
||||
|
||||
- name: cnvpytor view test_cnvpytor_view stub
|
||||
command: nextflow run tests/modules/cnvpytor/view -entry test_cnvpytor_view_stub -c tests/config/nextflow.config -stub-run
|
||||
tags:
|
||||
- cnvpytor
|
||||
- cnvpytor/view
|
||||
files:
|
||||
- path: output/cnvpytor/test.vcf
|
||||
- path: output/cnvpytor/versions.yml
|
|
@ -10,5 +10,5 @@ workflow test_ensemblvep {
|
|||
file(params.test_data['sarscov2']['illumina']['test_vcf'], checkIfExists: true)
|
||||
]
|
||||
|
||||
ENSEMBLVEP ( input, "WBcel235", "caenorhabditis_elegans", "104", [] )
|
||||
ENSEMBLVEP ( input, "WBcel235", "caenorhabditis_elegans", "104", [], [] )
|
||||
}
|
||||
|
|
39
tests/modules/happy/happy/main.nf
Normal file
39
tests/modules/happy/happy/main.nf
Normal file
|
@ -0,0 +1,39 @@
|
|||
#!/usr/bin/env nextflow
|
||||
|
||||
nextflow.enable.dsl = 2
|
||||
|
||||
include { HAPPY_HAPPY } from '../../../../modules/happy/happy/main.nf'
|
||||
|
||||
workflow test_happy_vcf {
|
||||
|
||||
input = [
|
||||
[ id:'test' ], // meta map
|
||||
file(params.test_data['homo_sapiens']['illumina']['test_rnaseq_vcf'], checkIfExists: true),
|
||||
file(params.test_data['homo_sapiens']['illumina']['test_genome21_indels_vcf_gz'], checkIfExists: true),
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true)
|
||||
]
|
||||
|
||||
fasta = Channel.value([
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true),
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
|
||||
])
|
||||
|
||||
HAPPY_HAPPY ( input, fasta )
|
||||
}
|
||||
|
||||
workflow test_happy_gvcf {
|
||||
|
||||
input = [
|
||||
[ id:'test' ], // meta map
|
||||
file(params.test_data['homo_sapiens']['illumina']['test_rnaseq_vcf'], checkIfExists: true),
|
||||
file(params.test_data['homo_sapiens']['illumina']['test_genome_vcf'], checkIfExists: true),
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true)
|
||||
]
|
||||
|
||||
fasta = Channel.value([
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true),
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
|
||||
])
|
||||
|
||||
HAPPY_HAPPY ( input, fasta )
|
||||
}
|
5
tests/modules/happy/happy/nextflow.config
Normal file
5
tests/modules/happy/happy/nextflow.config
Normal file
|
@ -0,0 +1,5 @@
|
|||
process {
|
||||
|
||||
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
|
||||
|
||||
}
|
27
tests/modules/happy/happy/test.yml
Normal file
27
tests/modules/happy/happy/test.yml
Normal file
|
@ -0,0 +1,27 @@
|
|||
- name: happy happy test_happy_vcf
|
||||
command: nextflow run tests/modules/happy/happy -entry test_happy_vcf -c tests/config/nextflow.config
|
||||
tags:
|
||||
- happy
|
||||
- happy/happy
|
||||
files:
|
||||
- path: output/happy/test.extended.csv
|
||||
md5sum: ef79c7c789ef4f146ca2e50dafaf22b3
|
||||
- path: output/happy/test.runinfo.json
|
||||
- path: output/happy/test.summary.csv
|
||||
md5sum: f8aa5d36d3c48dede2f607fd565894ad
|
||||
- path: output/happy/versions.yml
|
||||
md5sum: 82243bf6dbdc71aa63211ee2a89f47f2
|
||||
|
||||
- name: happy happy test_happy_gvcf
|
||||
command: nextflow run tests/modules/happy/happy -entry test_happy_gvcf -c tests/config/nextflow.config
|
||||
tags:
|
||||
- happy
|
||||
- happy/happy
|
||||
files:
|
||||
- path: output/happy/test.extended.csv
|
||||
md5sum: 3d5c21b67a259a3f6dcb088d55b86cd3
|
||||
- path: output/happy/test.runinfo.json
|
||||
- path: output/happy/test.summary.csv
|
||||
md5sum: 03044e9bb5a0c6f0947b7e910fc8a558
|
||||
- path: output/happy/versions.yml
|
||||
md5sum: 551fa216952d6f5de78e6e453b92aaab
|
37
tests/modules/happy/prepy/main.nf
Normal file
37
tests/modules/happy/prepy/main.nf
Normal file
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env nextflow
|
||||
|
||||
nextflow.enable.dsl = 2
|
||||
|
||||
include { HAPPY_PREPY } from '../../../../modules/happy/prepy/main.nf'
|
||||
|
||||
workflow test_happy_prepy_vcf {
|
||||
|
||||
input = [
|
||||
[ id:'test' ], // meta map
|
||||
file(params.test_data['homo_sapiens']['illumina']['test_genome21_indels_vcf_gz'], checkIfExists: true),
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true)
|
||||
]
|
||||
|
||||
fasta = Channel.value([
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true),
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
|
||||
])
|
||||
|
||||
HAPPY_PREPY ( input, fasta )
|
||||
}
|
||||
|
||||
workflow test_happy_prepy_gvcf {
|
||||
|
||||
input = [
|
||||
[ id:'test' ], // meta map
|
||||
file(params.test_data['homo_sapiens']['illumina']['test_genome_vcf'], checkIfExists: true),
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true)
|
||||
]
|
||||
|
||||
fasta = Channel.value([
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true),
|
||||
file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
|
||||
])
|
||||
|
||||
HAPPY_PREPY ( input, fasta )
|
||||
}
|
5
tests/modules/happy/prepy/nextflow.config
Normal file
5
tests/modules/happy/prepy/nextflow.config
Normal file
|
@ -0,0 +1,5 @@
|
|||
process {
|
||||
|
||||
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
|
||||
|
||||
}
|
19
tests/modules/happy/prepy/test.yml
Normal file
19
tests/modules/happy/prepy/test.yml
Normal file
|
@ -0,0 +1,19 @@
|
|||
- name: happy prepy test_happy_prepy_vcf
|
||||
command: nextflow run tests/modules/happy/prepy -entry test_happy_prepy_vcf -c tests/config/nextflow.config
|
||||
tags:
|
||||
- happy/prepy
|
||||
- happy
|
||||
files:
|
||||
- path: output/happy/test.vcf.gz
|
||||
- path: output/happy/versions.yml
|
||||
md5sum: 814d20f1f29f23a3d21012748a5d6393
|
||||
|
||||
- name: happy prepy test_happy_prepy_gvcf
|
||||
command: nextflow run tests/modules/happy/prepy -entry test_happy_prepy_gvcf -c tests/config/nextflow.config
|
||||
tags:
|
||||
- happy/prepy
|
||||
- happy
|
||||
files:
|
||||
- path: output/happy/test.vcf.gz
|
||||
- path: output/happy/versions.yml
|
||||
md5sum: 970a54de46e68ef6d5228a26eaa4c8e7
|
|
@ -2,15 +2,27 @@
|
|||
|
||||
nextflow.enable.dsl = 2
|
||||
|
||||
include { KRONA_KTIMPORTTAXONOMY } from '../../../../modules/krona/ktimporttaxonomy/main.nf'
|
||||
include { KRONA_KTIMPORTTAXONOMY as KRONA_KTIMPORTTAXONOMY_READS } from '../../../../modules/krona/ktimporttaxonomy/main.nf'
|
||||
include { KRONA_KTIMPORTTAXONOMY as KRONA_KTIMPORTTAXONOMY_REPORT } from '../../../../modules/krona/ktimporttaxonomy/main.nf'
|
||||
|
||||
workflow test_krona_ktimporttaxonomy {
|
||||
workflow test_krona_ktimporttaxonomy_reads {
|
||||
|
||||
input = [
|
||||
[ id:'test', single_end:false ], // meta map
|
||||
file(params.test_data['generic']['txt']['hello'], checkIfExists: true)
|
||||
file(params.test_data['sarscov2']['metagenome']['classified_reads_assignment'], checkIfExists: true)
|
||||
]
|
||||
taxonomy = file(params.test_data['generic']['txt']['hello'], checkIfExists: true)
|
||||
taxonomy = file(params.test_data['sarscov2']['metagenome']['krona_taxonomy'], checkIfExists: true)
|
||||
|
||||
KRONA_KTIMPORTTAXONOMY ( input, taxonomy )
|
||||
KRONA_KTIMPORTTAXONOMY_READS ( input, taxonomy )
|
||||
}
|
||||
|
||||
workflow test_krona_ktimporttaxonomy_report {
|
||||
|
||||
input = [
|
||||
[ id:'test', single_end:false ], // meta map
|
||||
file(params.test_data['sarscov2']['metagenome']['kraken_report'], checkIfExists: true)
|
||||
]
|
||||
taxonomy = file(params.test_data['sarscov2']['metagenome']['krona_taxonomy'], checkIfExists: true)
|
||||
|
||||
KRONA_KTIMPORTTAXONOMY_REPORT ( input, taxonomy )
|
||||
}
|
||||
|
|
|
@ -2,4 +2,12 @@ process {
|
|||
|
||||
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
|
||||
|
||||
withName: KRONA_KTIMPORTTAXONOMY_READS {
|
||||
ext.args = '-t 3'
|
||||
}
|
||||
|
||||
withName: KRONA_KTIMPORTTAXONOMY_REPORT {
|
||||
ext.args = '-m 3 -t 5'
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,9 +1,23 @@
|
|||
- name: krona ktimporttaxonomy test_krona_ktimporttaxonomy
|
||||
command: nextflow run ./tests/modules/krona/ktimporttaxonomy -entry test_krona_ktimporttaxonomy -c ./tests/config/nextflow.config -c ./tests/modules/krona/ktimporttaxonomy/nextflow.config
|
||||
- name: krona ktimporttaxonomy test_krona_ktimporttaxonomy_reads
|
||||
command: nextflow run tests/modules/krona/ktimporttaxonomy -entry test_krona_ktimporttaxonomy_reads -c tests/config/nextflow.config
|
||||
tags:
|
||||
- krona/ktimporttaxonomy
|
||||
- krona
|
||||
- krona/ktimporttaxonomy
|
||||
files:
|
||||
- path: output/krona/taxonomy.krona.html
|
||||
contains:
|
||||
- "DOCTYPE html PUBLIC"
|
||||
- path: output/krona/versions.yml
|
||||
md5sum: 660a8c151191bf4c63bd96db2c7fe503
|
||||
|
||||
- name: krona ktimporttaxonomy test_krona_ktimporttaxonomy_report
|
||||
command: nextflow run tests/modules/krona/ktimporttaxonomy -entry test_krona_ktimporttaxonomy_report -c tests/config/nextflow.config
|
||||
tags:
|
||||
- krona
|
||||
- krona/ktimporttaxonomy
|
||||
files:
|
||||
- path: output/krona/taxonomy.krona.html
|
||||
contains:
|
||||
- "DOCTYPE html PUBLIC"
|
||||
- path: output/krona/versions.yml
|
||||
md5sum: 8a593c16bb2d4132638fb0fc342fe2b7
|
||||
|
|
9
tests/modules/krona/ktupdatetaxonomy/main.nf
Normal file
9
tests/modules/krona/ktupdatetaxonomy/main.nf
Normal file
|
@ -0,0 +1,9 @@
|
|||
#!/usr/bin/env nextflow
|
||||
|
||||
nextflow.enable.dsl = 2
|
||||
|
||||
include { KRONA_KTUPDATETAXONOMY } from '../../../../modules/krona/ktupdatetaxonomy/main.nf'
|
||||
|
||||
workflow test_krona_ktupdatetaxonomy {
|
||||
KRONA_KTUPDATETAXONOMY ( )
|
||||
}
|
5
tests/modules/krona/ktupdatetaxonomy/nextflow.config
Normal file
5
tests/modules/krona/ktupdatetaxonomy/nextflow.config
Normal file
|
@ -0,0 +1,5 @@
|
|||
process {
|
||||
|
||||
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
|
||||
|
||||
}
|
7
tests/modules/krona/ktupdatetaxonomy/test.yml
Normal file
7
tests/modules/krona/ktupdatetaxonomy/test.yml
Normal file
|
@ -0,0 +1,7 @@
|
|||
- name: krona ktupdatetaxonomy test_krona_ktupdatetaxonomy
|
||||
command: nextflow run ./tests/modules/krona/ktupdatetaxonomy -entry test_krona_ktupdatetaxonomy -c ./tests/config/nextflow.config -c ./tests/modules/krona/ktupdatetaxonomy/nextflow.config
|
||||
tags:
|
||||
- krona
|
||||
- krona/ktupdatetaxonomy
|
||||
files:
|
||||
- path: output/krona/taxonomy/taxonomy.tab
|
15
tests/modules/md5sum/main.nf
Normal file
15
tests/modules/md5sum/main.nf
Normal file
|
@ -0,0 +1,15 @@
|
|||
#!/usr/bin/env nextflow
|
||||
|
||||
nextflow.enable.dsl = 2
|
||||
|
||||
include { MD5SUM } from '../../../modules/md5sum/main.nf'
|
||||
|
||||
workflow test_md5sum {
|
||||
|
||||
input = [
|
||||
[ id:'test', single_end:false ], // meta map
|
||||
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true)
|
||||
]
|
||||
|
||||
MD5SUM ( input )
|
||||
}
|
3
tests/modules/md5sum/nextflow.config
Normal file
3
tests/modules/md5sum/nextflow.config
Normal file
|
@ -0,0 +1,3 @@
|
|||
process {
|
||||
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
|
||||
}
|
8
tests/modules/md5sum/test.yml
Normal file
8
tests/modules/md5sum/test.yml
Normal file
|
@ -0,0 +1,8 @@
|
|||
- name: md5sum test_md5sum
|
||||
command: nextflow run tests/modules/md5sum -entry test_md5sum -c tests/config/nextflow.config
|
||||
tags:
|
||||
- md5sum
|
||||
files:
|
||||
- path: output/md5sum/test.paired_end.bam.md5
|
||||
md5sum: 1163095be8fdfb2acb3cc6c027389c4b
|
||||
- path: output/md5sum/versions.yml
|
12
tests/modules/motus/downloaddb/main.nf
Normal file
12
tests/modules/motus/downloaddb/main.nf
Normal file
|
@ -0,0 +1,12 @@
|
|||
#!/usr/bin/env nextflow
|
||||
|
||||
nextflow.enable.dsl = 2
|
||||
|
||||
include { MOTUS_DOWNLOADDB } from '../../../../modules/motus/downloaddb/main.nf'
|
||||
|
||||
workflow test_motus_downloaddb {
|
||||
|
||||
input = file('https://raw.githubusercontent.com/motu-tool/mOTUs/master/motus/downloadDB.py')
|
||||
|
||||
MOTUS_DOWNLOADDB ( input )
|
||||
}
|
5
tests/modules/motus/downloaddb/nextflow.config
Normal file
5
tests/modules/motus/downloaddb/nextflow.config
Normal file
|
@ -0,0 +1,5 @@
|
|||
process {
|
||||
|
||||
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
|
||||
|
||||
}
|
7
tests/modules/motus/downloaddb/test.yml
Normal file
7
tests/modules/motus/downloaddb/test.yml
Normal file
|
@ -0,0 +1,7 @@
|
|||
- name: motus downloaddb test_motus_downloaddb
|
||||
command: nextflow run tests/modules/motus/downloaddb -entry test_motus_downloaddb -c tests/config/nextflow.config
|
||||
tags:
|
||||
- motus
|
||||
- motus/downloaddb
|
||||
files:
|
||||
- path: output/motus/db_mOTU/db_mOTU_versions
|
|
@ -1,14 +1,15 @@
|
|||
- name: samtools bam2fq test_samtools_bam2fq_nosplit
|
||||
command: nextflow run ./tests/modules/samtools/bam2fq -entry test_samtools_bam2fq_nosplit -c ./tests/config/nextflow.config -c ./tests/modules/samtools/bam2fq/nextflow.config
|
||||
command: nextflow run tests/modules/samtools/bam2fq -entry test_samtools_bam2fq_nosplit -c tests/config/nextflow.config
|
||||
tags:
|
||||
- samtools/bam2fq
|
||||
- samtools
|
||||
files:
|
||||
- path: output/samtools/test_interleaved.fq.gz
|
||||
md5sum: d733e66d29a4b366bf9df8c42f845256
|
||||
- path: output/samtools/versions.yml
|
||||
md5sum: 4973eac1b6a8f090d5fcd4456d65a894
|
||||
|
||||
- name: samtools bam2fq test_samtools_bam2fq_withsplit
|
||||
command: nextflow run ./tests/modules/samtools/bam2fq -entry test_samtools_bam2fq_withsplit -c ./tests/config/nextflow.config -c ./tests/modules/samtools/bam2fq/nextflow.config
|
||||
command: nextflow run tests/modules/samtools/bam2fq -entry test_samtools_bam2fq_withsplit -c tests/config/nextflow.config
|
||||
tags:
|
||||
- samtools/bam2fq
|
||||
- samtools
|
||||
|
@ -21,3 +22,5 @@
|
|||
md5sum: 709872fc2910431b1e8b7074bfe38c67
|
||||
- path: output/samtools/test_singleton.fq.gz
|
||||
md5sum: 709872fc2910431b1e8b7074bfe38c67
|
||||
- path: output/samtools/versions.yml
|
||||
md5sum: e92d21bbcda2fed7cb438d95c51edff0
|
||||
|
|
15
tests/modules/shasum/main.nf
Normal file
15
tests/modules/shasum/main.nf
Normal file
|
@ -0,0 +1,15 @@
|
|||
#!/usr/bin/env nextflow
|
||||
|
||||
nextflow.enable.dsl = 2
|
||||
|
||||
include { SHASUM } from '../../../modules/shasum/main.nf'
|
||||
|
||||
workflow test_shasum {
|
||||
|
||||
input = [
|
||||
[ id:'test', single_end:false ], // meta map
|
||||
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true)
|
||||
]
|
||||
|
||||
SHASUM ( input )
|
||||
}
|
5
tests/modules/shasum/nextflow.config
Normal file
5
tests/modules/shasum/nextflow.config
Normal file
|
@ -0,0 +1,5 @@
|
|||
process {
|
||||
|
||||
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
|
||||
|
||||
}
|
8
tests/modules/shasum/test.yml
Normal file
8
tests/modules/shasum/test.yml
Normal file
|
@ -0,0 +1,8 @@
|
|||
- name: shasum test_shasum
|
||||
command: nextflow run tests/modules/shasum -entry test_shasum -c tests/config/nextflow.config
|
||||
tags:
|
||||
- shasum
|
||||
files:
|
||||
- path: output/shasum/test.paired_end.bam.sha256
|
||||
md5sum: 138a19e100f09fc975ea1b717da9b6dd
|
||||
- path: output/shasum/versions.yml
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
nextflow.enable.dsl = 2
|
||||
|
||||
include { ANNOTATION_ENSEMBLVEP } from '../../../../subworkflows/nf-core/annotation_ensemblvep/main'
|
||||
include { ANNOTATION_ENSEMBLVEP } from '../../../../../subworkflows/nf-core/annotation/ensemblvep/main'
|
||||
|
||||
workflow annotation_ensemblvep {
|
||||
input = [
|
||||
|
@ -10,5 +10,5 @@ workflow annotation_ensemblvep {
|
|||
file(params.test_data['sarscov2']['illumina']['test_vcf'], checkIfExists: true)
|
||||
]
|
||||
|
||||
ANNOTATION_ENSEMBLVEP ( input, "WBcel235", "caenorhabditis_elegans", "104", [] )
|
||||
ANNOTATION_ENSEMBLVEP ( input, "WBcel235", "caenorhabditis_elegans", "104", [], [] )
|
||||
}
|
|
@ -7,7 +7,7 @@ process {
|
|||
publishDir = [ enabled: false ]
|
||||
}
|
||||
|
||||
withName: ANNOTATION_BGZIPTABIX {
|
||||
withName: TABIX_BGZIPTABIX {
|
||||
ext.prefix = { "${meta.id}_VEP.ann.vcf" }
|
||||
}
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
nextflow.enable.dsl = 2
|
||||
|
||||
include { ANNOTATION_SNPEFF } from '../../../../subworkflows/nf-core/annotation_snpeff/main'
|
||||
include { ANNOTATION_SNPEFF } from '../../../../../subworkflows/nf-core/annotation_snpeff/main'
|
||||
|
||||
workflow annotation_snpeff {
|
||||
input = [
|
|
@ -7,7 +7,7 @@ process {
|
|||
publishDir = [ enabled: false ]
|
||||
}
|
||||
|
||||
withName: ANNOTATION_BGZIPTABIX {
|
||||
withName: TABIX_BGZIPTABIX {
|
||||
ext.prefix = { "${meta.id}_snpEff.ann.vcf" }
|
||||
}
|
||||
|
Loading…
Reference in a new issue