Merge branch 'nf-core:master' into antismashlite

This commit is contained in:
Jasmin F 2022-05-11 12:03:30 +02:00 committed by GitHub
commit 9cce4a00b2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
169 changed files with 3935 additions and 476 deletions

View file

@ -2,15 +2,20 @@ process ARRIBA {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::arriba=2.1.0" : null)
conda (params.enable_conda ? "bioconda::arriba=2.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/arriba:2.1.0--h3198e80_1' :
'quay.io/biocontainers/arriba:2.1.0--h3198e80_1' }"
'https://depot.galaxyproject.org/singularity/arriba:2.2.1--hecb563c_2' :
'quay.io/biocontainers/arriba:2.2.1--hecb563c_2' }"
input:
tuple val(meta), path(bam)
path fasta
path gtf
path blacklist
path known_fusions
path structural_variants
path tags
path protein_domains
output:
tuple val(meta), path("*.fusions.tsv") , emit: fusions
@ -23,7 +28,12 @@ process ARRIBA {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def blacklist = (args.contains('-b')) ? '' : '-f blacklist'
def blacklist = blacklist ? "-b $blacklist" : "-f blacklist"
def known_fusions = known_fusions ? "-k $known_fusions" : ""
def structural_variants = structural_variants ? "-d $structual_variants" : ""
def tags = tags ? "-t $tags" : ""
def protein_domains = protein_domains ? "-p $protein_domains" : ""
"""
arriba \\
-x $bam \\
@ -32,6 +42,10 @@ process ARRIBA {
-o ${prefix}.fusions.tsv \\
-O ${prefix}.fusions.discarded.tsv \\
$blacklist \\
$known_fusions \\
$structural_variants \\
$tags \\
$protein_domains \\
$args
cat <<-END_VERSIONS > versions.yml
@ -39,4 +53,14 @@ process ARRIBA {
arriba: \$(arriba -h | grep 'Version:' 2>&1 | sed 's/Version:\s//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
echo stub > ${prefix}.fusions.tsv
echo stub > ${prefix}.fusions.discarded.tsv
echo "${task.process}:" > versions.yml
echo ' arriba: 2.2.1' >> versions.yml
"""
}

View file

@ -30,6 +30,26 @@ input:
type: file
description: Annotation GTF file
pattern: "*.{gtf}"
- blacklist:
type: file
description: Blacklist file
pattern: "*.{tsv}"
- known_fusions:
type: file
description: Known fusions file
pattern: "*.{tsv}"
- structural_variants:
type: file
description: Structural variants file
pattern: "*.{tsv}"
- tags:
type: file
description: Tags file
pattern: "*.{tsv}"
- protein_domains:
type: file
description: Protein domains file
pattern: "*.{gff3}"
output:
- meta:
@ -51,4 +71,4 @@ output:
pattern: "*.{fusions.discarded.tsv}"
authors:
- "@praveenraj2018"
- "@praveenraj2018,@rannick"

View file

@ -29,6 +29,8 @@ process BOWTIE2_ALIGN {
def unaligned = save_unaligned ? "--un-gz ${prefix}.unmapped.fastq.gz" : ''
"""
INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
[ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed 's/.rev.1.bt2l//'`
[ -z "\$INDEX" ] && echo "BT2 index files not found" 1>&2 && exit 1
bowtie2 \\
-x \$INDEX \\
-U $reads \\
@ -49,6 +51,8 @@ process BOWTIE2_ALIGN {
def unaligned = save_unaligned ? "--un-conc-gz ${prefix}.unmapped.fastq.gz" : ''
"""
INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
[ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed 's/.rev.1.bt2l//'`
[ -z "\$INDEX" ] && echo "BT2 index files not found" 1>&2 && exit 1
bowtie2 \\
-x \$INDEX \\
-1 ${reads[0]} \\

84
modules/busco/main.nf Normal file
View file

@ -0,0 +1,84 @@
process BUSCO {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::busco=5.3.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/busco:5.3.2--pyhdfd78af_0':
'quay.io/biocontainers/busco:5.3.2--pyhdfd78af_0' }"
input:
tuple val(meta), path('tmp_input/*')
each lineage // Required: lineage to check against, "auto" enables --auto-lineage instead
path busco_lineages_path // Recommended: path to busco lineages - downloads if not set
path config_file // Optional: busco configuration file
output:
tuple val(meta), path("*-busco.batch_summary.txt"), emit: batch_summary
tuple val(meta), path("short_summary.*.txt") , emit: short_summaries_txt, optional: true
tuple val(meta), path("short_summary.*.json") , emit: short_summaries_json, optional: true
tuple val(meta), path("*-busco") , emit: busco_dir
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}-${lineage}"
def busco_config = config_file ? "--config $config_file" : ''
def busco_lineage = lineage.equals('auto') ? '--auto-lineage' : "--lineage_dataset ${lineage}"
def busco_lineage_dir = busco_lineages_path ? "--offline --download_path ${busco_lineages_path}" : ''
"""
# Nextflow changes the container --entrypoint to /bin/bash (container default entrypoint: /usr/local/env-execute)
# Check for container variable initialisation script and source it.
if [ -f "/usr/local/env-activate.sh" ]; then
set +u # Otherwise, errors out because of various unbound variables
. "/usr/local/env-activate.sh"
set -u
fi
# If the augustus config directory is not writable, then copy to writeable area
if [ ! -w "\${AUGUSTUS_CONFIG_PATH}" ]; then
# Create writable tmp directory for augustus
AUG_CONF_DIR=\$( mktemp -d -p \$PWD )
cp -r \$AUGUSTUS_CONFIG_PATH/* \$AUG_CONF_DIR
export AUGUSTUS_CONFIG_PATH=\$AUG_CONF_DIR
echo "New AUGUSTUS_CONFIG_PATH=\${AUGUSTUS_CONFIG_PATH}"
fi
# Ensure the input is uncompressed
INPUT_SEQS=input_seqs
mkdir "\$INPUT_SEQS"
cd "\$INPUT_SEQS"
for FASTA in ../tmp_input/*; do
if [ "\${FASTA##*.}" == 'gz' ]; then
gzip -cdf "\$FASTA" > \$( basename "\$FASTA" .gz )
else
ln -s "\$FASTA" .
fi
done
cd ..
busco \\
--cpu $task.cpus \\
--in "\$INPUT_SEQS" \\
--out ${prefix}-busco \\
$busco_lineage \\
$busco_lineage_dir \\
$busco_config \\
$args
# clean up
rm -rf "\$INPUT_SEQS"
# Move files to avoid staging/publishing issues
mv ${prefix}-busco/batch_summary.txt ${prefix}-busco.batch_summary.txt
mv ${prefix}-busco/*/short_summary.*.{json,txt} . || echo "Short summaries were not available: No genes were found."
cat <<-END_VERSIONS > versions.yml
"${task.process}":
busco: \$( busco --version 2>&1 | sed 's/^BUSCO //' )
END_VERSIONS
"""
}

69
modules/busco/meta.yml Normal file
View file

@ -0,0 +1,69 @@
name: busco
description: Benchmarking Universal Single Copy Orthologs
keywords:
- quality control
- genome
- transcriptome
- proteome
tools:
- busco:
description: BUSCO provides measures for quantitative assessment of genome assembly, gene set, and transcriptome completeness based on evolutionarily informed expectations of gene content from near-universal single-copy orthologs selected from OrthoDB.
homepage: https://busco.ezlab.org/
documentation: https://busco.ezlab.org/busco_userguide.html
tool_dev_url: https://gitlab.com/ezlab/busco
doi: "10.1007/978-1-4939-9173-0_14"
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- fasta:
type: file
description: Nucleic or amino acid sequence file in FASTA format.
pattern: "*.{fasta,fna,fa,fasta.gz,fna.gz,fa.gz}"
- lineage:
type: value
description: The BUSCO lineage to use, or "auto" to automatically select lineage
- busco_lineages_path:
type: directory
description: Path to local BUSCO lineages directory.
- config_file:
type: file
description: Path to BUSCO config file.
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- batch_summary:
type: file
description: Summary of all sequence files analyzed
pattern: "*-busco.batch_summary.txt"
- short_summaries_txt:
type: file
description: Short Busco summary in plain text format
pattern: "short_summary.*.txt"
- short_summaries_json:
type: file
description: Short Busco summary in JSON format
pattern: "short_summary.*.json"
- busco_dir:
type: directory
description: BUSCO lineage specific output
pattern: "*-busco"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@priyanka-surana"
- "@charles-plessy"
- "@mahesh-panchal"
- "@muffato"
- "@jvhagey"

View file

@ -2,43 +2,42 @@ process CNVPYTOR_CALLCNVS {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
conda (params.enable_conda ? "bioconda::cnvpytor=1.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
'https://depot.galaxyproject.org/singularity/cnvpytor:1.2.1--pyhdfd78af_0':
'quay.io/biocontainers/cnvpytor:1.2.1--pyhdfd78af_0' }"
input:
tuple val(meta), path(pytor)
val bin_sizes
output:
tuple val(meta), path("*.tsv"), emit: cnvs
path "versions.yml" , emit: versions
tuple val(meta), path("${pytor.baseName}.pytor") , emit: pytor
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: '1000'
def prefix = task.ext.prefix ?: "${meta.id}"
def bins = bin_sizes ?: '1000'
"""
cnvpytor \\
-root $pytor \\
-call $args > ${prefix}.tsv
-call $bin_sizes
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.tsv
touch ${pytor.baseName}.pytor
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
END_VERSIONS
"""
}

View file

@ -17,8 +17,11 @@ input:
e.g. [ id:'test']
- pytor:
type: file
description: cnvpytor root file
description: pytor file containing partitions of read depth histograms using mean-shift method
pattern: "*.{pytor}"
- bin_sizes:
type: string
description: list of binsizes separated by space e.g. "1000 10000" and "1000"
output:
- meta:
@ -26,10 +29,10 @@ output:
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- cnvs:
- pytor:
type: file
description: file containing identified copy numer variations
pattern: "*.{tsv}"
description: pytor files containing cnv calls
pattern: "*.{pytor}"
- versions:
type: file
description: File containing software versions

View file

@ -2,13 +2,15 @@ process CNVPYTOR_HISTOGRAM {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
conda (params.enable_conda ? "bioconda::cnvpytor=1.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
'https://depot.galaxyproject.org/singularity/cnvpytor:1.2.1--pyhdfd78af_0':
'quay.io/biocontainers/cnvpytor:1.2.1--pyhdfd78af_0' }"
input:
tuple val(meta), path(pytor)
val bin_sizes
output:
tuple val(meta), path("${pytor.baseName}.pytor") , emit: pytor
@ -18,15 +20,15 @@ process CNVPYTOR_HISTOGRAM {
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: '1000'
def bins = bin_sizes ?: '1000'
"""
cnvpytor \\
-root $pytor \\
-his $args
-his $bins
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
END_VERSIONS
"""
@ -36,7 +38,7 @@ process CNVPYTOR_HISTOGRAM {
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
END_VERSIONS
"""
}

View file

@ -22,6 +22,9 @@ input:
type: file
description: pytor file containing read depth data
pattern: "*.{pytor}"
- bin_sizes:
type: string
description: list of binsizes separated by space e.g. "1000 10000" and "1000"
output:
- meta:
@ -40,3 +43,4 @@ output:
authors:
- "@sima-r"
- "@ramprasadn"

View file

@ -2,10 +2,10 @@ process CNVPYTOR_IMPORTREADDEPTH {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
conda (params.enable_conda ? "bioconda::cnvpytor=1.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
'https://depot.galaxyproject.org/singularity/cnvpytor:1.2.1--pyhdfd78af_0':
'quay.io/biocontainers/cnvpytor:1.2.1--pyhdfd78af_0' }"
input:
tuple val(meta), path(input_file), path(index)
@ -32,7 +32,7 @@ process CNVPYTOR_IMPORTREADDEPTH {
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
END_VERSIONS
"""
@ -43,7 +43,7 @@ process CNVPYTOR_IMPORTREADDEPTH {
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
END_VERSIONS
"""
}

View file

@ -52,3 +52,4 @@ output:
authors:
- "@sima-r"
- "@ramprasadn"

View file

@ -2,13 +2,14 @@ process CNVPYTOR_PARTITION {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
conda (params.enable_conda ? "bioconda::cnvpytor=1.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
'https://depot.galaxyproject.org/singularity/cnvpytor:1.2.1--pyhdfd78af_0':
'quay.io/biocontainers/cnvpytor:1.2.1--pyhdfd78af_0' }"
input:
tuple val(meta), path(pytor)
val bin_sizes
output:
tuple val(meta), path("${pytor.baseName}.pytor"), emit: pytor
@ -18,15 +19,15 @@ process CNVPYTOR_PARTITION {
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def bins = bin_sizes ?: '1000'
"""
cnvpytor \\
-root $pytor \\
-partition $args
-partition $bins
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
END_VERSIONS
"""
@ -36,7 +37,7 @@ process CNVPYTOR_PARTITION {
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
END_VERSIONS
"""
}

View file

@ -22,6 +22,9 @@ input:
type: file
description: pytor file containing read depth data
pattern: "*.{pytor}"
- bin_sizes:
type: string
description: list of binsizes separated by space e.g. "1000 10000" and "1000"
output:
- meta:
@ -40,3 +43,4 @@ output:
authors:
- "@sima-r"
- "@ramprasadn"

View file

@ -0,0 +1,60 @@
process CNVPYTOR_VIEW {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::cnvpytor=1.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:1.2.1--pyhdfd78af_0':
'quay.io/biocontainers/cnvpytor:1.2.1--pyhdfd78af_0' }"
input:
tuple val(meta), path(pytor_files)
val bin_sizes
val output_format
output:
tuple val(meta), path("*.vcf"), emit: vcf , optional: true
tuple val(meta), path("*.tsv"), emit: tsv , optional: true
tuple val(meta), path("*.xls"), emit: xls , optional: true
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def output_suffix = output_format ?: 'vcf'
def bins = bin_sizes ?: '1000'
def input = pytor_files.join(" ")
def prefix = task.ext.prefix ?: "${meta.id}"
"""
python3 <<CODE
import cnvpytor,os
binsizes = "${bins}".split(" ")
for binsize in binsizes:
file_list = "${input}".split(" ")
app = cnvpytor.Viewer(file_list, params={} )
outputfile = "{}_{}.{}".format("${prefix}",binsize.strip(),"${output_suffix}")
app.print_filename = outputfile
app.bin_size = int(binsize)
app.print_calls_file()
CODE
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
END_VERSIONS
"""
stub:
def output_suffix = output_format ?: 'vcf'
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.${output_suffix}
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/CNVpytor //' ))
END_VERSIONS
"""
}

View file

@ -0,0 +1,56 @@
name: cnvpytor_view
description: view function to generate vcfs
keywords:
- cnv calling
tools:
- cnvpytor:
description: calling CNVs using read depth
homepage: https://github.com/abyzovlab/CNVpytor
documentation: https://github.com/abyzovlab/CNVpytor
tool_dev_url: https://github.com/abyzovlab/CNVpytor
doi: "10.1101/2021.01.27.428472v1"
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- pytor_files:
type: file
description: pytor file containing cnv calls. To merge calls from multiple samples use a list of files.
pattern: "*.{pytor}"
- bin_sizes:
type: string
description: list of binsizes separated by space e.g. "1000 10000" and "1000"
- output_format:
type: string
description: output format of the cnv calls. Valid entries are "tsv", "vcf", and "xls"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- tsv:
type: file
description: tsv file containing cnv calls
pattern: "*.{tsv}"
- vcf:
type: file
description: vcf file containing cnv calls
pattern: "*.{vcf}"
- xls:
type: file
description: xls file containing cnv calls
pattern: "*.{xls}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@sima-r"
- "@ramprasadn"

View file

@ -8,13 +8,14 @@ LABEL \
COPY environment.yml /
RUN conda env create -f /environment.yml && conda clean -a
# Add conda installation dir to PATH (instead of doing 'conda activate')
ENV PATH /opt/conda/envs/nf-core-vep-104.3/bin:$PATH
# Setup default ARG variables
ARG GENOME=GRCh38
ARG SPECIES=homo_sapiens
ARG VEP_VERSION=99
ARG VEP_VERSION=104
ARG VEP_TAG=104.3
# Add conda installation dir to PATH (instead of doing 'conda activate')
ENV PATH /opt/conda/envs/nf-core-vep-${VEP_TAG}/bin:$PATH
# Download Genome
RUN vep_install \
@ -27,4 +28,4 @@ RUN vep_install \
--NO_BIOPERL --NO_HTSLIB --NO_TEST --NO_UPDATE
# Dump the details of the installed packages to a file for posterity
RUN conda env export --name nf-core-vep-104.3 > nf-core-vep-104.3.yml
RUN conda env export --name nf-core-vep-${VEP_TAG} > nf-core-vep-${VEP_TAG}.yml

View file

@ -10,11 +10,12 @@ build_push() {
VEP_TAG=$4
docker build \
. \
-t nfcore/vep:${VEP_TAG}.${GENOME} \
software/vep/. \
--build-arg GENOME=${GENOME} \
--build-arg SPECIES=${SPECIES} \
--build-arg VEP_VERSION=${VEP_VERSION}
--build-arg VEP_VERSION=${VEP_VERSION} \
--build-arg VEP_TAG=${VEP_TAG}
docker push nfcore/vep:${VEP_TAG}.${GENOME}
}

View file

@ -13,6 +13,7 @@ process ENSEMBLVEP {
val species
val cache_version
path cache
path extra_files
output:
tuple val(meta), path("*.ann.vcf"), emit: vcf

View file

@ -10,17 +10,6 @@ tools:
homepage: https://www.ensembl.org/info/docs/tools/vep/index.html
documentation: https://www.ensembl.org/info/docs/tools/vep/script/index.html
licence: ["Apache-2.0"]
params:
- use_cache:
type: boolean
description: |
Enable the usage of containers with cache
Does not work with conda
- vep_tag:
type: value
description: |
Specify the tag for the container
https://hub.docker.com/r/nfcore/vep/tags
input:
- meta:
type: map
@ -47,6 +36,10 @@ input:
type: file
description: |
path to VEP cache (optional)
- extra_files:
type: tuple
description: |
path to file(s) needed for plugins (optional)
output:
- vcf:
type: file

View file

@ -43,4 +43,15 @@ process GATK4_MERGEBAMALIGNMENT {
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.bam
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
}

View file

@ -57,4 +57,18 @@ process GATK4_MUTECT2 {
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.vcf.gz
touch ${prefix}.vcf.gz.tbi
touch ${prefix}.vcf.gz.stats
touch ${prefix}.f1r2.tar.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
}

View file

@ -39,4 +39,15 @@ process GATK4_REVERTSAM {
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.reverted.bam
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
}

View file

@ -40,4 +40,17 @@ process GATK4_SAMTOFASTQ {
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.fastq.gz
touch ${prefix}_1.fastq.gz
touch ${prefix}_2.fastq.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,42 @@
def VERSION = '0.3.14'
process HAPPY_HAPPY {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::hap.py=0.3.14" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/hap.py:0.3.14--py27h5c5a3ab_0':
'quay.io/biocontainers/hap.py:0.3.14--py27h5c5a3ab_0' }"
input:
tuple val(meta), path(truth_vcf), path(query_vcf), path(bed)
tuple path(fasta), path(fasta_fai)
output:
tuple val(meta), path('*.csv'), path('*.json') , emit: metrics
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
hap.py \\
$truth_vcf \\
$query_vcf \\
$args \\
--reference $fasta \\
--threads $task.cpus \\
-R $bed \\
-o $prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
hap.py: $VERSION
END_VERSIONS
"""
}

View file

@ -0,0 +1,67 @@
name: "happy_happy"
description: Hap.py is a tool to compare diploid genotypes at haplotype level. Rather than comparing VCF records row by row, hap.py will generate and match alternate sequences in a superlocus. A superlocus is a small region of the genome (sized between 1 and around 1000 bp) that contains one or more variants.
keywords:
- happy
- benchmark
- haplotype
tools:
- "happy":
description: "Haplotype VCF comparison tools"
homepage: "https://www.illumina.com/products/by-type/informatics-products/basespace-sequence-hub/apps/hap-py-benchmarking.html"
documentation: "https://github.com/Illumina/hap.py"
tool_dev_url: "https://github.com/Illumina/hap.py"
doi: ""
licence: "['BSD-2-clause']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- truth_vcf:
type: file
description: gold standard VCF file
pattern: "*.{vcf,vcf.gz}"
- query_vcf:
type: file
description: VCF/GVCF file to query
pattern: "*.{vcf,vcf.gz}"
- bed:
type: file
description: BED file
pattern: "*.bed"
- fasta:
type: file
description: FASTA file of the reference genome
pattern: "*.{fa,fasta}"
- fasta_fai:
type: file
description: The index of the reference FASTA
pattern: "*.fai"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- summary:
type: file
description: A CSV file containing the summary of the benchmarking
pattern: "*.summary.csv"
- extended:
type: file
description: A CSV file containing extended info of the benchmarking
pattern: "*.extended.csv"
- runinfo:
type: file
description: A JSON file containing the run info
pattern: "*.runinfo.json"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@nvnieuwk"

View file

@ -0,0 +1,41 @@
def VERSION = '0.3.14'
process HAPPY_PREPY {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::hap.py=0.3.14" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/hap.py:0.3.14--py27h5c5a3ab_0':
'quay.io/biocontainers/hap.py:0.3.14--py27h5c5a3ab_0' }"
input:
tuple val(meta), path(vcf), path(bed)
tuple path(fasta), path(fasta_fai)
output:
tuple val(meta), path('*.vcf.gz') , emit: preprocessed_vcf
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
pre.py \\
$args \\
-R $bed \\
--reference $fasta \\
--threads $task.cpus \\
$vcf \\
${prefix}.vcf.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
pre.py: $VERSION
END_VERSIONS
"""
}

View file

@ -0,0 +1,55 @@
name: "happy_prepy"
description: Pre.py is a preprocessing tool made to preprocess VCF files for Hap.py
keywords:
- happy
- benchmark
- haplotype
tools:
- "happy":
description: "Haplotype VCF comparison tools"
homepage: "https://www.illumina.com/products/by-type/informatics-products/basespace-sequence-hub/apps/hap-py-benchmarking.html"
documentation: "https://github.com/Illumina/hap.py"
tool_dev_url: "https://github.com/Illumina/hap.py"
doi: ""
licence: "['BSD-2-clause']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- vcf:
type: file
description: VCF file to preprocess
pattern: "*.{vcf,vcf.gz}"
- bed:
type: file
description: BED file
pattern: "*.bed"
- fasta:
type: file
description: FASTA file of the reference genome
pattern: "*.{fa,fasta}"
- fasta_fai:
type: file
description: The index of the reference FASTA
pattern: "*.fai"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- vcf:
type: file
description: A preprocessed VCF file
pattern: "*.vcf.gz"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@nvnieuwk"

View file

@ -18,7 +18,9 @@ process KRONA_KRONADB {
script:
def args = task.ext.args ?: ''
"""
ktUpdateTaxonomy.sh taxonomy
ktUpdateTaxonomy.sh \\
$args \\
taxonomy/
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -23,7 +23,10 @@ process KRONA_KTIMPORTTAXONOMY {
script:
def args = task.ext.args ?: ''
"""
ktImportTaxonomy "$report" -tax taxonomy
ktImportTaxonomy \\
$args \\
-tax taxonomy/ \\
"$report"
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -23,8 +23,11 @@ input:
Groovy Map containing sample information
e.g. [ id:'test']
- database:
type: path
description: "Path to the taxonomy database downloaded by krona/kronadb"
type: file
description: |
Path to the taxonomy database .tab file downloaded by krona/ktUpdateTaxonomy
The file will be saved under a folder named "taxonomy" as "taxonomy/taxonomy.tab".
The parent folder will be passed as argument to ktImportTaxonomy.
- report:
type: file
description: "A tab-delimited file with taxonomy IDs and (optionally) query IDs, magnitudes, and scores. Query IDs are taken from column 1, taxonomy IDs from column 2, and scores from column 3. Lines beginning with # will be ignored."

View file

@ -0,0 +1,30 @@
def VERSION='2.7.1' // Version information not provided by tool on CLI
process KRONA_KTUPDATETAXONOMY {
label 'process_low'
conda (params.enable_conda ? "bioconda::krona=2.7.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/krona:2.7.1--pl526_5' :
'quay.io/biocontainers/krona:2.7.1--pl526_5' }"
output:
path 'taxonomy/taxonomy.tab', emit: db
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
"""
ktUpdateTaxonomy.sh \\
$args \\
taxonomy/
cat <<-END_VERSIONS > versions.yml
"${task.process}":
krona: $VERSION
END_VERSIONS
"""
}

View file

@ -0,0 +1,31 @@
name: krona_ktupdatetaxonomy
description: KronaTools Update Taxonomy downloads a taxonomy database
keywords:
- database
- taxonomy
- krona
- visualisation
tools:
- krona:
description: Krona Tools is a set of scripts to create Krona charts from several Bioinformatics tools as well as from text and XML files.
homepage: https://github.com/marbl/Krona/wiki/KronaTools
documentation: https://github.com/marbl/Krona/wiki/Installing
tool_dev_url:
doi: https://doi.org/10.1186/1471-2105-12-385
licence:
input:
- none: There is no input. This module downloads a pre-built taxonomy database for use with Krona Tools.
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- db:
type: file
description: A TAB separated file that contains a taxonomy database.
pattern: "*.{tab}"
authors:
- "@mjakobs"

35
modules/md5sum/main.nf Normal file
View file

@ -0,0 +1,35 @@
process MD5SUM {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "conda-forge::coreutils=9.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
'ubuntu:20.04' }"
input:
tuple val(meta), path(file)
output:
tuple val(meta), path("*.md5"), emit: checksum
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
md5sum \\
$args \\
${file} \\
> ${file}.md5
cat <<-END_VERSIONS > versions.yml
"${task.process}":
md5sum: \$(echo \$(md5sum --version 2>&1 | head -n 1| sed 's/^.*) //;' ))
END_VERSIONS
"""
}

39
modules/md5sum/meta.yml Normal file
View file

@ -0,0 +1,39 @@
name: "md5sum"
description: Create an MD5 (128-bit) checksum
keywords:
- checksum
tools:
- "md5sum":
description: Create an MD5 (128-bit) checksum
homepage: "https://www.gnu.org"
documentation: "https://man7.org/linux/man-pages/man1/md5sum.1.html"
licence: GPLv3+
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- file:
type: file
description: Any file
pattern: "*.*"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- checksum:
type: file
description: File containing checksum
pattern: "*.md5"
authors:
- "@matthdsm"

View file

@ -0,0 +1,37 @@
process MERYL_COUNT {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::meryl=1.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/meryl:1.3--h87f3376_1':
'quay.io/biocontainers/meryl:1.3--h87f3376_1' }"
input:
tuple val(meta), path(reads)
output:
tuple val(meta), path("*.meryldb"), emit: meryl_db
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
for READ in $reads; do
meryl count \\
threads=$task.cpus \\
$args \\
$reads \\
output read.\${READ%.f*}.meryldb
done
cat <<-END_VERSIONS > versions.yml
"${task.process}":
meryl: \$( meryl --version |& sed 's/meryl //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,43 @@
name: "meryl_count"
description: A genomic k-mer counter (and sequence utility) with nice features.
keywords:
- k-mer
- count
tools:
- "meryl":
description: "A genomic k-mer counter (and sequence utility) with nice features. "
homepage: "https://github.com/marbl/meryl"
documentation: "https://meryl.readthedocs.io/en/latest/quick-start.html"
tool_dev_url: "https://github.com/marbl/meryl"
doi: ""
licence: "['GPL']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: |
List of input FastQ files of size 1 and 2 for single-end and paired-end data,
respectively.
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- meryl_db:
type: directory
description: A Meryl k-mer database
pattern: "*.meryldb"
authors:
- "@mahesh-panchal"

View file

@ -0,0 +1,34 @@
process MERYL_HISTOGRAM {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::meryl=1.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/meryl:1.3--h87f3376_1':
'quay.io/biocontainers/meryl:1.3--h87f3376_1' }"
input:
tuple val(meta), path(meryl_db)
output:
tuple val(meta), path("*.hist"), emit: hist
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
meryl histogram \\
threads=$task.cpus \\
$args \\
$meryl_db > ${prefix}.hist
cat <<-END_VERSIONS > versions.yml
"${task.process}":
meryl: \$( meryl --version |& sed 's/meryl //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,41 @@
name: "meryl_histogram"
description: A genomic k-mer counter (and sequence utility) with nice features.
keywords:
- k-mer
- histogram
tools:
- "meryl":
description: "A genomic k-mer counter (and sequence utility) with nice features. "
homepage: "https://github.com/marbl/meryl"
documentation: "https://meryl.readthedocs.io/en/latest/quick-start.html"
tool_dev_url: "https://github.com/marbl/meryl"
doi: ""
licence: "['GPL']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- meryl_dbs:
type: directory
description: Meryl k-mer database
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- hist:
type: file
description: Histogram of k-mers
pattern: "*.hist"
authors:
- "@mahesh-panchal"

View file

@ -0,0 +1,35 @@
process MERYL_UNIONSUM {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::meryl=1.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/meryl:1.3--h87f3376_1':
'quay.io/biocontainers/meryl:1.3--h87f3376_1' }"
input:
tuple val(meta), path(meryl_dbs)
output:
tuple val(meta), path("*.unionsum.meryldb"), emit: meryl_db
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
meryl union-sum \\
threads=$task.cpus \\
$args \\
output ${prefix}.unionsum.meryldb \\
$meryl_dbs
cat <<-END_VERSIONS > versions.yml
"${task.process}":
meryl: \$( meryl --version |& sed 's/meryl //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,41 @@
name: "meryl_unionsum"
description: A genomic k-mer counter (and sequence utility) with nice features.
keywords:
- k-mer
- unionsum
tools:
- "meryl":
description: "A genomic k-mer counter (and sequence utility) with nice features. "
homepage: "https://github.com/marbl/meryl"
documentation: "https://meryl.readthedocs.io/en/latest/quick-start.html"
tool_dev_url: "https://github.com/marbl/meryl"
doi: ""
licence: "['GPL']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- meryl_dbs:
type: directory
description: Meryl k-mer databases
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- meryl_db:
type: directory
description: A Meryl k-mer database that is the union sum of the input databases
pattern: "*.unionsum.meryldb"
authors:
- "@mahesh-panchal"

View file

@ -23,7 +23,7 @@ process METAPHLAN3 {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def input_type = ("$input".endsWith(".fastq.gz")) ? "--input_type fastq" : ("$input".contains(".fasta")) ? "--input_type fasta" : ("$input".endsWith(".bowtie2out.txt")) ? "--input_type bowtie2out" : "--input_type sam"
def input_type = ("$input".endsWith(".fastq.gz") || "$input".endsWith(".fq.gz")) ? "--input_type fastq" : ("$input".contains(".fasta")) ? "--input_type fasta" : ("$input".endsWith(".bowtie2out.txt")) ? "--input_type bowtie2out" : "--input_type sam"
def input_data = ("$input_type".contains("fastq")) && !meta.single_end ? "${input[0]},${input[1]}" : "$input"
def bowtie2_out = "$input_type" == "--input_type bowtie2out" || "$input_type" == "--input_type sam" ? '' : "--bowtie2out ${prefix}.bowtie2out.txt"

View file

@ -0,0 +1,39 @@
process MOTUS_DOWNLOADDB {
label 'process_low'
conda (params.enable_conda ? "bioconda::motus=3.0.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/motus:3.0.1--pyhdfd78af_0':
'quay.io/biocontainers/motus:3.0.1--pyhdfd78af_0' }"
input:
path motus_downloaddb_script
output:
path "db_mOTU/" , emit: db
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def software = "${motus_downloaddb_script.simpleName}_copy.py"
"""
## must copy script file to working directory,
## otherwise the reference_db will be download to bin folder
## other than current directory
cp $motus_downloaddb_script ${software}
python ${software} \\
$args \\
-t $task.cpus
## mOTUs version number is not available from command line.
## mOTUs save the version number in index database folder.
## mOTUs will check the database version is same version as exec version.
cat <<-END_VERSIONS > versions.yml
"${task.process}":
mOTUs: \$(grep motus db_mOTU/db_mOTU_versions | sed 's/motus\\t//g')
END_VERSIONS
"""
}

View file

@ -0,0 +1,39 @@
name: "motus_downloaddb"
description: Download the mOTUs database
keywords:
- classify
- metagenomics
- fastq
- taxonomic profiling
- database
- download
tools:
- "motus":
description: "The mOTU profiler is a computational tool that estimates relative taxonomic abundance of known and currently unknown microbial community members using metagenomic shotgun sequencing data."
homepage: "None"
documentation: "https://github.com/motu-tool/mOTUs/wiki"
tool_dev_url: "https://github.com/motu-tool/mOTUs"
doi: "10.1038/s41467-019-08844-4"
licence: "['GPL v3']"
input:
- motus_downloaddb:
type: directory
description: |
The mOTUs downloadDB script source file.
It is the source file installed or
remote source in github such as https://raw.githubusercontent.com/motu-tool/mOTUs/master/motus/downloadDB.py
pattern: "downloadDB.py"
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- db:
type: directory
description: The mOTUs database directory
pattern: "db_mOTU"
authors:
- "@jianhong"

View file

@ -0,0 +1,59 @@
process RTGTOOLS_VCFEVAL {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::rtg-tools=3.12.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/rtg-tools:3.12.1--hdfd78af_0':
'quay.io/biocontainers/rtg-tools:3.12.1--hdfd78af_0' }"
input:
tuple val(meta), path(query_vcf), path(query_vcf_tbi)
tuple path(truth_vcf), path(truth_vcf_tbi)
path(truth_regions)
path(evaluation_regions)
path(sdf)
output:
tuple val(meta), path("**results/{done,progress,*.log}") , emit: logs
tuple val(meta), path("**tp.vcf.gz"), path("**tp.vcf.gz.tbi") , emit: tp
tuple val(meta), path("**fn.vcf.gz"), path("**fn.vcf.gz.tbi") , emit: fn
tuple val(meta), path("**fp.vcf.gz"), path("**fp.vcf.gz.tbi") , emit: fp
tuple val(meta), path("**baseline.vcf.gz"), path("**baseline.vcf.gz.tbi") , emit: baseline
tuple val(meta), path("**.tsv.gz") , emit: roc
tuple val(meta), path("**results/summary.txt") , emit: summary
tuple val(meta), path("**results/phasing.txt") , emit: phasing
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ""
def prefix = task.ext.prefix ?: "${meta.id}"
def bed_regions = truth_regions ? "--bed-regions=$truth_regions" : ""
def eval_regions = evaluation_regions ? "--evaluation-regions=$evaluation_regions" : ""
def truth_index = truth_vcf_tbi ? "" : "rtg index $truth_vcf"
def query_index = query_vcf_tbi ? "" : "rtg index $query_vcf"
"""
$truth_index
$query_index
rtg vcfeval \\
$args \\
--baseline=$truth_vcf \\
$bed_regions \\
$eval_regions \\
--calls=$query_vcf \\
--output=${prefix}_results \\
--template=$sdf \\
--threads=$task.cpus \\
cat <<-END_VERSIONS > versions.yml
"${task.process}":
rtg-tools: \$(echo \$(rtg version | head -n 1 | awk '{print \$4}'))
END_VERSIONS
"""
}

View file

@ -0,0 +1,95 @@
name: "rtgtools_vcfeval"
description: The VCFeval tool of RTG tools. It is used to evaluate called variants for agreement with a baseline variant set
keywords:
- benchmarking
- vcf
- rtg-tools
tools:
- "rtgtools":
description: "RealTimeGenomics Tools -- Utilities for accurate VCF comparison and manipulation"
homepage: "https://www.realtimegenomics.com/products/rtg-tools"
documentation: "https://github.com/RealTimeGenomics/rtg-tools"
tool_dev_url: "https://github.com/RealTimeGenomics/rtg-tools"
doi: ""
licence: "['BSD']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- truth_vcf:
type: file
description: A standard VCF to compare against
pattern: "*.{vcf,vcf.gz}"
- truth_vcf_index:
type: file
description: The index of the standard VCF (optional)
pattern: "*.tbi"
- query_vcf:
type: file
description: A VCF with called variants to benchmark against the standard
pattern: "*.{vcf,vcf.gz}"
- query_vcf_index:
type: file
description: The index of the called VCF (optional)
pattern: "*.tbi"
- truth_regions:
type: file
description: A BED file containining the strict regions where VCFeval should only evaluate the fully overlapping variants (optional)
pattern: "*.bed"
- evaluation_regions:
type: file
description: A BED file containing the regions where VCFeval will evaluate every fully and partially overlapping variant (optional)
pattern: "*.bed"
- sdf:
type: file
description: The SDF (RTG Sequence Data File) folder of the reference genome
pattern: "*"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- logging:
type: file
description: Files containing logging from vcfeval
pattern: "*{done,progress,.log}"
- tp:
type: file
description: A tuple containing the VCF and TBI file for the true positive variants
pattern: "tp.vcf{.gz,.gz.tbi}"
- baseline:
type: file
description: A tuple containing the VCF and TBI file for the baseline true positive variants
pattern: "tp-baseline.vcf{.gz,.gz.tbi}"
- fp:
type: file
description: A tuple containing the VCF and TBI file for the false positive variants
pattern: "fp.vcf{.gz,.gz.tbi}"
- fn:
type: file
description: A tuple containing the VCF and TBI file for the false negative variants
pattern: "fn.vcf{.gz,.gz.tbi}"
- roc:
type: file
description: TSV files containing ROC data for the evaluated variants
pattern: "*.tsv.gz"
- summary:
type: file
description: A TXT file containing the summary of the evaluation
pattern: "summary.txt"
- phasing:
type: file
description: A TXT file containing the data on the phasing
pattern: "phasing.txt"
authors:
- "@nvnieuwk"

View file

@ -45,7 +45,7 @@ process SAMTOOLS_BAM2FQ {
bam2fq \\
$args \\
-@ $task.cpus \\
$inputbam >${prefix}_interleaved.fq.gz
$inputbam | gzip --no-name > ${prefix}_interleaved.fq.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -1,5 +1,4 @@
//There is a -L option to only output alignments in interval, might be an option for exons/panel data?
process SAMTOOLS_BAMTOCRAM {
process SAMTOOLS_CONVERT {
tag "$meta.id"
label 'process_medium'
@ -14,8 +13,8 @@ process SAMTOOLS_BAMTOCRAM {
path fai
output:
tuple val(meta), path("*.cram"), path("*.crai"), emit: cram_crai
path "versions.yml" , emit: versions
tuple val(meta), path("*.{cram,bam}"), path("*.{crai,bai}") , emit: alignment_index
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
@ -23,9 +22,17 @@ process SAMTOOLS_BAMTOCRAM {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def output_extension = input.getExtension() == "bam" ? "cram" : "bam"
"""
samtools view --threads ${task.cpus} --reference ${fasta} -C $args $input > ${prefix}.cram
samtools index -@${task.cpus} ${prefix}.cram
samtools view \\
--threads ${task.cpus} \\
--reference ${fasta} \\
$args \\
$input \\
-o ${prefix}.${output_extension}
samtools index -@${task.cpus} ${prefix}.${output_extension}
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -1,5 +1,5 @@
name: samtools_bamtocram
description: filter/convert and then index CRAM file
name: samtools_convert
description: convert and then index CRAM -> BAM or BAM -> CRAM file
keywords:
- view
- index
@ -23,12 +23,12 @@ input:
e.g. [ id:'test', single_end:false ]
- input:
type: file
description: BAM/SAM file
pattern: "*.{bam,sam}"
description: BAM/CRAM file
pattern: "*.{bam,cram}"
- index:
type: file
description: BAM/SAM index file
pattern: "*.{bai,sai}"
description: BAM/CRAM index file
pattern: "*.{bai,crai}"
- fasta:
type: file
description: Reference file to create the CRAM file
@ -39,10 +39,10 @@ output:
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- cram_crai:
- alignment_index:
type: file
description: filtered/converted CRAM file + index
pattern: "*{.cram,.crai}"
description: filtered/converted BAM/CRAM file + index
pattern: "*{.bam/cram,.bai/crai}"
- version:
type: file
description: File containing software version

View file

@ -41,4 +41,16 @@ process SAMTOOLS_VIEW {
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.bam
touch ${prefix}.cram
cat <<-END_VERSIONS > versions.yml
"${task.process}":
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
}

35
modules/shasum/main.nf Normal file
View file

@ -0,0 +1,35 @@
process SHASUM {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "conda-forge::coreutils=9.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
'ubuntu:20.04' }"
input:
tuple val(meta), path(file)
output:
tuple val(meta), path("*.sha256"), emit: checksum
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
sha256sum \\
$args \\
${file} \\
> ${file}.sha256
cat <<-END_VERSIONS > versions.yml
"${task.process}":
sha256sum: \$(echo \$(sha256sum --version 2>&1 | head -n 1| sed 's/^.*) //;' ))
END_VERSIONS
"""
}

40
modules/shasum/meta.yml Normal file
View file

@ -0,0 +1,40 @@
name: "shasum"
description: Print SHA256 (256-bit) checksums.
keywords:
- checksum
- sha256
tools:
- "md5sum":
description: Create an SHA256 (256-bit) checksum.
homepage: "https://www.gnu.org"
documentation: "https://linux.die.net/man/1/shasum"
licence: GPLv3+
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- file:
type: file
description: Any file
pattern: "*.*"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- checksum:
type: file
description: File containing checksum
pattern: "*.sha256"
authors:
- "@matthdsm"

View file

@ -0,0 +1,64 @@
process SHIGATYPER {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::shigatyper=2.0.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/shigatyper%3A2.0.1--pyhdfd78af_0':
'quay.io/biocontainers/shigatyper:2.0.1--pyhdfd78af_0' }"
input:
tuple val(meta), path(reads)
output:
tuple val(meta), path("${prefix}.tsv") , emit: tsv
tuple val(meta), path("${prefix}-hits.tsv"), optional: true, emit: hits
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
prefix = task.ext.prefix ?: "${meta.id}"
if (meta.is_ont) {
"""
shigatyper \\
$args \\
--SE $reads \\
--ont \\
--name $prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
shigatyper: \$(echo \$(shigatyper --version 2>&1) | sed 's/^.*ShigaTyper //' )
END_VERSIONS
"""
} else if (meta.single_end) {
"""
shigatyper \\
$args \\
--SE $reads \\
--name $prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
shigatyper: \$(echo \$(shigatyper --version 2>&1) | sed 's/^.*ShigaTyper //' )
END_VERSIONS
"""
} else {
"""
shigatyper \\
$args \\
--R1 ${reads[0]} \\
--R2 ${reads[1]} \\
--name $prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
shigatyper: \$(echo \$(shigatyper --version 2>&1) | sed 's/^.*ShigaTyper //' )
END_VERSIONS
"""
}
}

View file

@ -0,0 +1,47 @@
name: "shigatyper"
description: Determine Shigella serotype from Illumina or Oxford Nanopore reads
keywords:
- fastq
- shigella
- serotype
tools:
- "shigatyper":
description: "Typing tool for Shigella spp. from WGS Illumina sequencing"
homepage: "https://github.com/CFSAN-Biostatistics/shigatyper"
documentation: "https://github.com/CFSAN-Biostatistics/shigatyper"
tool_dev_url: "https://github.com/CFSAN-Biostatistics/shigatyper"
doi: "10.1128/AEM.00165-19"
licence: "['Public Domain']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false, is_ont:false ]
- reads:
type: file
description: Illumina or Nanopore FASTQ file
pattern: "*.fastq.gz"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- tsv:
type: file
description: A TSV formatted file with ShigaTyper results
pattern: "*.tsv"
- hits:
type: file
description: A TSV formatted file with individual gene hits
pattern: "*-hits.tsv"
authors:
- "@rpetit3"

52
modules/slimfastq/main.nf Normal file
View file

@ -0,0 +1,52 @@
def VERSION = '2.04'
process SLIMFASTQ {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::slimfastq=2.04" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/slimfastq:2.04--h87f3376_2':
'quay.io/biocontainers/slimfastq:2.04--h87f3376_2' }"
input:
tuple val(meta), path(fastq)
output:
tuple val(meta), path("*.sfq"), emit: sfq
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
if (meta.single_end) {
"""
gzip -d -c '${fastq}' | slimfastq \\
$args \\
-f '${prefix}.sfq'
cat <<-END_VERSIONS > versions.yml
"${task.process}":
slimfastq: ${VERSION}
END_VERSIONS
"""
} else {
"""
gzip -d -c '${fastq[0]}' | slimfastq \\
$args \\
-f '${prefix}_1.sfq'
gzip -d -c '${fastq[1]}' | slimfastq \\
$args \\
-f '${prefix}_2.sfq'
cat <<-END_VERSIONS > versions.yml
"${task.process}":
slimfastq: ${VERSION}
END_VERSIONS
"""
}
}

View file

@ -0,0 +1,41 @@
name: "slimfastq"
description: Fast, efficient, lossless compression of FASTQ files.
keywords:
- FASTQ
- compression
- lossless
tools:
- "slimfastq":
description: "slimfastq efficiently compresses/decompresses FASTQ files"
homepage: "https://github.com/Infinidat/slimfastq"
tool_dev_url: "https://github.com/Infinidat/slimfastq"
licence: "['BSD-3-clause']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- fastq:
type: file
description: Either a single-end FASTQ file or paired-end files.
pattern: "*.{fq.gz,fastq.gz}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- sfq:
type: file
description: Either one or two sequence files in slimfastq compressed format.
pattern: "*.{sfq}"
authors:
- "@Midnighter"

View file

@ -1,4 +1,4 @@
process SNAPALIGNER_SINGLE {
process SNAPALIGNER_ALIGN {
tag '$meta.id'
label 'process_high'
@ -21,15 +21,16 @@ process SNAPALIGNER_SINGLE {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def subcmd = meta.single_end ? "single" : "paired"
"""
mkdir -p index
mv $index index/
snap-aligner single \\
snap-aligner ${subcmd} \\
index \\
${reads.join(" ")} \\
-o -bam ${prefix}.bam \\
-o ${prefix}.bam \\
-t ${task.cpus} \\
$args

View file

@ -1,5 +1,5 @@
name: "snapaligner_single"
description: Performs single end fastq alignment to a fasta reference using SNAP
name: "snapaligner_align"
description: Performs fastq alignment to a fasta reference using SNAP
keywords:
- alignment
- map
@ -22,7 +22,7 @@ input:
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: List of single end input files
description: List of input fastq files of size 2 for paired fastq or 1 for bam or single fastq
pattern: "*.{fastq.gz,fq.gz,fastq,fq,bam}"
- index:
type: file

View file

@ -1,41 +0,0 @@
process SNAPALIGNER_PAIRED {
tag '$meta.id'
label 'process_high'
conda (params.enable_conda ? "bioconda::snap-aligner=2.0.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/snap-aligner:2.0.1--hd03093a_1':
'quay.io/biocontainers/snap-aligner:2.0.1--hd03093a_1' }"
input:
tuple val(meta), path(reads)
path index
output:
tuple val(meta), path("*.bam"), emit: bam
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
mkdir -p index
mv $index index/
snap-aligner paired \\
index \\
${reads.join(" ")} \\
-o -bam ${prefix}.bam \\
-t ${task.cpus} \\
$args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
snapaligner: \$(snap-aligner 2>&1| head -n 1 | sed 's/^.*version //;s/.\$//')
END_VERSIONS
"""
}

View file

@ -1,48 +0,0 @@
name: "snapaligner_paired"
description: Performs paired end fastq alignment to a fasta reference using SNAP
keywords:
- alignment
- map
- fastq
- bam
- sam
tools:
- "snapaligner":
description: "Scalable Nucleotide Alignment Program -- a fast and accurate read aligner for high-throughput sequencing data"
homepage: "http://snap.cs.berkeley.edu"
documentation: "https://1drv.ms/b/s!AhuEg_0yZD86hcpblUt-muHKYsG8fA?e=R8ogug"
tool_dev_url: "https://github.com/amplab/snap"
doi: "10.1101/2021.11.23.469039"
licence: "['Apache v2']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: List of input fastq files of size 2 for fastq or 1 for bam
pattern: "*.{fastq.gz,fq.gz,fastq,fq,bam}"
- index:
type: file
description: List of SNAP genome index files
pattern: "{Genome,GenomeIndex,GenomeIndexHash,OverflowTable}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- bam:
type: file
description: Aligned BAM file
pattern: "*.{bam}"
authors:
- "@matthdsm"

View file

@ -8,15 +8,16 @@ LABEL \
COPY environment.yml /
RUN conda env create -f /environment.yml && conda clean -a
# Add conda installation dir to PATH (instead of doing 'conda activate')
ENV PATH /opt/conda/envs/nf-core-snpeff-5.0/bin:$PATH
# Setup default ARG variables
ARG GENOME=GRCh38
ARG SNPEFF_CACHE_VERSION=99
ARG SNPEFF_TAG=99
# Add conda installation dir to PATH (instead of doing 'conda activate')
ENV PATH /opt/conda/envs/nf-core-snpeff-${SNPEFF_TAG}/bin:$PATH
# Download Genome
RUN snpEff download -v ${GENOME}.${SNPEFF_CACHE_VERSION}
# Dump the details of the installed packages to a file for posterity
RUN conda env export --name nf-core-snpeff-5.0 > nf-core-snpeff-5.0.yml
RUN conda env export --name nf-core-snpeff-${SNPEFF_TAG} > nf-core-snpeff-${SNPEFF_TAG}.yml

5
modules/snpeff/build.sh Executable file → Normal file
View file

@ -9,10 +9,11 @@ build_push() {
SNPEFF_TAG=$3
docker build \
. \
-t nfcore/snpeff:${SNPEFF_TAG}.${GENOME} \
software/snpeff/. \
--build-arg GENOME=${GENOME} \
--build-arg SNPEFF_CACHE_VERSION=${SNPEFF_CACHE_VERSION}
--build-arg SNPEFF_CACHE_VERSION=${SNPEFF_CACHE_VERSION} \
--build-arg SNPEFF_TAG=${SNPEFF_TAG}
docker push nfcore/snpeff:${SNPEFF_TAG}.${GENOME}
}

View file

@ -10,18 +10,6 @@ tools:
homepage: https://pcingola.github.io/SnpEff/
documentation: https://pcingola.github.io/SnpEff/se_introduction/
licence: ["MIT"]
params:
- use_cache:
type: boolean
description: |
boolean to enable the usage of containers with cache
Enable the usage of containers with cache
Does not work with conda
- snpeff_tag:
type: value
description: |
Specify the tag for the container
https://hub.docker.com/r/nfcore/snpeff/tags
input:
- meta:
type: map

View file

@ -1,43 +1,26 @@
process SRATOOLS_PREFETCH {
tag "$id"
label 'process_low'
label 'error_retry'
conda (params.enable_conda ? 'bioconda::sra-tools=2.11.0' : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/sra-tools:2.11.0--pl5262h314213e_0' :
'quay.io/biocontainers/sra-tools:2.11.0--pl5262h314213e_0' }"
'https://depot.galaxyproject.org/singularity/sra-tools:2.11.0--pl5321ha49a11a_3' :
'quay.io/biocontainers/sra-tools:2.11.0--pl5321ha49a11a_3' }"
input:
tuple val(meta), val(id)
output:
tuple val(meta), path("$id"), emit: sra
tuple val(meta), path(id), emit: sra
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def config = "/LIBS/GUID = \"${UUID.randomUUID().toString()}\"\\n/libs/cloud/report_instance_identity = \"true\"\\n"
"""
eval "\$(vdb-config -o n NCBI_SETTINGS | sed 's/[" ]//g')"
if [[ ! -f "\${NCBI_SETTINGS}" ]]; then
mkdir -p "\$(dirname "\${NCBI_SETTINGS}")"
printf '${config}' > "\${NCBI_SETTINGS}"
fi
shell:
args = task.ext.args ?: ''
args2 = task.ext.args2 ?: '5 1 100' // <num retries> <base delay in seconds> <max delay in seconds>
config = "/LIBS/GUID = \"${UUID.randomUUID().toString()}\"\\n/libs/cloud/report_instance_identity = \"true\"\\n"
prefetch \\
$args \\
--progress \\
$id
vdb-validate $id
cat <<-END_VERSIONS > versions.yml
"${task.process}":
sratools: \$(prefetch --version 2>&1 | grep -Eo '[0-9.]+')
END_VERSIONS
"""
template 'retry_with_backoff.sh'
}

View file

@ -0,0 +1,59 @@
#!/usr/bin/env bash
set -u
retry_with_backoff() {
local max_attempts=${1}
local delay=${2}
local max_time=${3}
local attempt=1
local output=
local status=
# Remove the first three arguments to this function in order to access
# the 'real' command with `${@}`.
shift 3
while [ ${attempt} -le ${max_attempts} ]; do
output=$("${@}")
status=${?}
if [ ${status} -eq 0 ]; then
break
fi
if [ ${attempt} -lt ${max_attempts} ]; then
echo "Failed attempt ${attempt} of ${max_attempts}. Retrying in ${delay} s." >&2
sleep ${delay}
elif [ ${attempt} -eq ${max_attempts} ]; then
echo "Failed after ${attempt} attempts." >&2
return ${status}
fi
attempt=$(( ${attempt} + 1 ))
delay=$(( ${delay} * 2 ))
if [ ${delay} -ge ${max_time} ]; then
delay=${max_time}
fi
done
echo "${output}"
}
eval "$(vdb-config -o n NCBI_SETTINGS | sed 's/[" ]//g')"
if [[ ! -f "${NCBI_SETTINGS}" ]]; then
mkdir -p "$(dirname "${NCBI_SETTINGS}")"
printf '!{config}' > "${NCBI_SETTINGS}"
fi
retry_with_backoff !{args2} \
prefetch \
!{args} \
!{id}
vdb-validate !{id}
cat <<-END_VERSIONS > versions.yml
"!{task.process}":
sratools: $(prefetch --version 2>&1 | grep -Eo '[0-9.]+')
END_VERSIONS

View file

@ -0,0 +1,47 @@
process SRST2_SRST2 {
tag "${meta.id}"
label 'process_low'
conda (params.enable_conda ? "bioconda::srst2=0.2.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/srst2%3A0.2.0--py27_2':
'quay.io/biocontainers/srst2:0.2.0--py27_2'}"
input:
tuple val(meta), path(fastq_s), path(db)
output:
tuple val(meta), path("*_genes_*_results.txt") , optional:true, emit: gene_results
tuple val(meta), path("*_fullgenes_*_results.txt") , optional:true, emit: fullgene_results
tuple val(meta), path("*_mlst_*_results.txt") , optional:true, emit: mlst_results
tuple val(meta), path("*.pileup") , emit: pileup
tuple val(meta), path("*.sorted.bam") , emit: sorted_bam
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ""
def prefix = task.ext.prefix ?: "${meta.id}"
def read_s = meta.single_end ? "--input_se ${fastq_s}" : "--input_pe ${fastq_s[0]} ${fastq_s[1]}"
if (meta.db=="gene") {
database = "--gene_db ${db}"
} else if (meta.db=="mlst") {
database = "--mlst_db ${db}"
} else {
error "Please set meta.db to either \"gene\" or \"mlst\""
}
"""
srst2 \\
${read_s} \\
--threads $task.cpus \\
--output ${prefix} \\
${database} \\
$args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
srst2: \$(echo \$(srst2 --version 2>&1) | sed 's/srst2 //' ))
END_VERSIONS
"""
}

View file

@ -0,0 +1,72 @@
name: srst2_srst2
description: |
Short Read Sequence Typing for Bacterial Pathogens is a program designed to take Illumina sequence data,
a MLST database and/or a database of gene sequences (e.g. resistance genes, virulence genes, etc)
and report the presence of STs and/or reference genes.
keywords:
- mlst
- typing
- illumina
tools:
- srst2:
description: "Short Read Sequence Typing for Bacterial Pathogens"
homepage: "http://katholt.github.io/srst2/"
documentation: "https://github.com/katholt/srst2/blob/master/README.md"
tool_dev_url: "https://github.com/katholt/srst2"
doi: "10.1186/s13073-014-0090-6"
licence: ["BSD"]
input:
- meta:
type: map0.2.0-4
description: |
Groovy Map containing sample information
id: should be the identification number or sample name
single_end: should be true for single end data and false for paired in data
db: should be either 'gene' to use the --gene_db option or "mlst" to use the --mlst_db option
e.g. [ id:'sample', single_end:false , db:'gene']
- fasta:
type: file
description: |
gzipped fasta file. If files are NOT in
MiSeq format sample_S1_L001_R1_001.fastq.gz uses --forward and --reverse parameters; otherwise
default is _1, i.e. expect forward reads as sample_1.fastq.gz).
pattern: "*.fastq.gz"
- db:
type: file
description: Database in FASTA format
pattern: "*.fasta"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'sample', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- txt:
type: file
description: A detailed report, with one row per gene per sample described here github.com/katholt/srst2#gene-typing
pattern: "*_fullgenes_*_results.txt"
- txt:
type: file
description: A tabulated summary report of samples x genes.
pattern: "*_genes_*_results.txt"
- txt:
type: file
description: A tabulated summary report of mlst subtyping.
pattern: "*_mlst_*_results.txt"
- bam:
type: file
description: Sorted BAM file
pattern: "*.sorted.bam"
- pileup:
type: file
description: SAMtools pileup file
pattern: "*.pileup"
authors:
- "@jvhagey"

View file

@ -2,10 +2,10 @@ process SVDB_MERGE {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::svdb=2.6.0" : null)
conda (params.enable_conda ? "bioconda::svdb=2.6.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/svdb:2.6.0--py39h5371cbf_0':
'quay.io/biocontainers/svdb:2.6.0--py39h5371cbf_0' }"
'https://depot.galaxyproject.org/singularity/svdb:2.6.1--py39h5371cbf_0':
'quay.io/biocontainers/svdb:2.6.1--py39h5371cbf_0' }"
input:
tuple val(meta), path(vcfs)

View file

@ -2,10 +2,10 @@ process SVDB_QUERY {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::svdb=2.6.0" : null)
conda (params.enable_conda ? "bioconda::svdb=2.6.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/svdb:2.6.0--py39h5371cbf_0':
'quay.io/biocontainers/svdb:2.6.0--py39h5371cbf_0' }"
'https://depot.galaxyproject.org/singularity/svdb:2.6.1--py39h5371cbf_0':
'quay.io/biocontainers/svdb:2.6.1--py39h5371cbf_0' }"
input:
tuple val(meta), path(vcf)

View file

@ -0,0 +1,49 @@
def VERSION = '1.8.3'
process VARDICTJAVA {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::vardict-java=1.8.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/vardict-java:1.8.3--hdfd78af_0':
'quay.io/biocontainers/vardict-java:1.8.3--hdfd78af_0' }"
input:
tuple val(meta), path(bam), path(bai), path(bed)
tuple path(fasta), path(fasta_fai)
output:
tuple val(meta), path("*.vcf.gz"), emit: vcf
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
vardict-java \\
$args \\
-c 1 -S 2 -E 3 \\
-b $bam \\
-th $task.cpus \\
-N $prefix \\
-G $fasta \\
$bed \\
| teststrandbias.R \\
| var2vcf_valid.pl \\
$args2 \\
-N $prefix \\
| gzip -c > ${prefix}.vcf.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
vardict-java: $VERSION
var2vcf_valid.pl: \$(echo \$(var2vcf_valid.pl -h | sed -n 2p | awk '{ print \$2 }'))
END_VERSIONS
"""
}

View file

@ -0,0 +1,60 @@
name: "vardictjava"
description: The Java port of the VarDict variant caller
keywords:
- variant calling
- VarDict
- AstraZeneca
tools:
- "vardictjava":
description: "Java port of the VarDict variant discovery program"
homepage: "https://github.com/AstraZeneca-NGS/VarDictJava"
documentation: "https://github.com/AstraZeneca-NGS/VarDictJava"
tool_dev_url: "https://github.com/AstraZeneca-NGS/VarDictJava"
doi: "10.1093/nar/gkw227 "
licence: "['MIT']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: BAM/SAM file
pattern: "*.{bam,sam}"
- bai:
type: file
description: Index of the BAM file
pattern: "*.bai"
- fasta:
type: file
description: FASTA of the reference genome
pattern: "*.{fa,fasta}"
- fasta_fai:
type: file
description: The index of the FASTA of the reference genome
pattern: "*.fai"
- bed:
type: file
description: BED with the regions of interest
pattern: "*.bed"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- vcf:
type: file
description: VCF file output
pattern: "*.vcf.gz"
authors:
- "@nvnieuwk"

View file

@ -0,0 +1,31 @@
//
// Run VEP to annotate VCF files
//
include { ENSEMBLVEP } from '../../../../modules/ensemblvep/main'
include { TABIX_BGZIPTABIX } from '../../../../modules/tabix/bgziptabix/main'
workflow ANNOTATION_ENSEMBLVEP {
take:
vcf // channel: [ val(meta), vcf ]
vep_genome // value: genome to use
vep_species // value: species to use
vep_cache_version // value: cache version to use
vep_cache // path: /path/to/vep/cache (optionnal)
vep_extra_files // channel: [ file1, file2...] (optionnal)
main:
ch_versions = Channel.empty()
ENSEMBLVEP(vcf, vep_genome, vep_species, vep_cache_version, vep_cache, vep_extra_files)
TABIX_BGZIPTABIX(ENSEMBLVEP.out.vcf)
// Gather versions of all tools used
ch_versions = ch_versions.mix(ENSEMBLVEP.out.versions.first())
ch_versions = ch_versions.mix(TABIX_BGZIPTABIX.out.versions.first())
emit:
vcf_tbi = TABIX_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ]
reports = ENSEMBLVEP.out.report // path: *.html
versions = ch_versions // path: versions.yml
}

View file

@ -0,0 +1,49 @@
name: annotation_ensemblvep
description: |
Perform annotation with ensemblvep and bgzip + tabix index the resulting VCF file
keywords:
- ensemblvep
modules:
- ensemblvep
- tabix/bgziptabix
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- vcf:
type: file
description: |
vcf to annotate
- genome:
type: value
description: |
which genome to annotate with
- species:
type: value
description: |
which species to annotate with
- cache_version:
type: value
description: |
which version of the cache to annotate with
- cache:
type: file
description: |
path to VEP cache (optional)
- extra_files:
type: tuple
description: |
path to file(s) needed for plugins (optional)
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- vcf_tbi:
type: file
description: Compressed vcf file + tabix index
pattern: "[ *{.vcf.gz,vcf.gz.tbi} ]"
authors:
- "@maxulysse"

View file

@ -0,0 +1,28 @@
//
// Run SNPEFF to annotate VCF files
//
include { SNPEFF } from '../../../../modules/snpeff/main'
include { TABIX_BGZIPTABIX } from '../../../../modules/tabix/bgziptabix/main'
workflow ANNOTATION_SNPEFF {
take:
vcf // channel: [ val(meta), vcf ]
snpeff_db // value: db version to use
snpeff_cache // path: /path/to/snpeff/cache (optionnal)
main:
ch_versions = Channel.empty()
SNPEFF(vcf, snpeff_db, snpeff_cache)
TABIX_BGZIPTABIX(SNPEFF.out.vcf)
// Gather versions of all tools used
ch_versions = ch_versions.mix(SNPEFF.out.versions.first())
ch_versions = ch_versions.mix(TABIX_BGZIPTABIX.out.versions.first())
emit:
vcf_tbi = TABIX_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ]
reports = SNPEFF.out.report // path: *.html
versions = ch_versions // path: versions.yml
}

View file

@ -11,11 +11,19 @@ input:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- input:
type: vcf
description: list containing one vcf file
pattern: "[ *.{vcf,vcf.gz} ]"
e.g. [ id:'test', single_end:false ]
- vcf:
type: file
description: |
vcf to annotate
- db:
type: value
description: |
which db to annotate with
- cache:
type: file
description: |
path to snpEff cache (optional)
output:
- versions:
type: file

View file

@ -1,26 +0,0 @@
//
// Run VEP to annotate VCF files
//
include { ENSEMBLVEP } from '../../../modules/ensemblvep/main'
include { TABIX_BGZIPTABIX as ANNOTATION_BGZIPTABIX } from '../../../modules/tabix/bgziptabix/main'
workflow ANNOTATION_ENSEMBLVEP {
take:
vcf // channel: [ val(meta), vcf ]
vep_genome // value: which genome
vep_species // value: which species
vep_cache_version // value: which cache version
vep_cache // path: path_to_vep_cache (optionnal)
main:
ENSEMBLVEP(vcf, vep_genome, vep_species, vep_cache_version, vep_cache)
ANNOTATION_BGZIPTABIX(ENSEMBLVEP.out.vcf)
ch_versions = ENSEMBLVEP.out.versions.first().mix(ANNOTATION_BGZIPTABIX.out.versions.first())
emit:
vcf_tbi = ANNOTATION_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ]
reports = ENSEMBLVEP.out.report // path: *.html
versions = ch_versions // path: versions.yml
}

View file

@ -1,29 +0,0 @@
name: annotation_ensemblvep
description: |
Perform annotation with ensemblvep and bgzip + tabix index the resulting VCF file
keywords:
- ensemblvep
modules:
- ensemblvep
- tabix/bgziptabix
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- input:
type: vcf
description: list containing one vcf file
pattern: "[ *.{vcf,vcf.gz} ]"
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- vcf_tbi:
type: file
description: Compressed vcf file + tabix index
pattern: "[ *{.vcf.gz,vcf.gz.tbi} ]"
authors:
- "@maxulysse"

View file

@ -1,23 +0,0 @@
//
// Run SNPEFF to annotate VCF files
//
include { SNPEFF } from '../../../modules/snpeff/main'
include { TABIX_BGZIPTABIX as ANNOTATION_BGZIPTABIX } from '../../../modules/tabix/bgziptabix/main'
workflow ANNOTATION_SNPEFF {
take:
vcf // channel: [ val(meta), vcf ]
snpeff_db // value: version of db to use
snpeff_cache // path: path_to_snpeff_cache (optionnal)
main:
SNPEFF(vcf, snpeff_db, snpeff_cache)
ANNOTATION_BGZIPTABIX(SNPEFF.out.vcf)
ch_versions = SNPEFF.out.versions.first().mix(ANNOTATION_BGZIPTABIX.out.versions.first())
emit:
vcf_tbi = ANNOTATION_BGZIPTABIX.out.gz_tbi // channel: [ val(meta), vcf.gz, vcf.gz.tbi ]
reports = SNPEFF.out.report // path: *.html
versions = ch_versions // path: versions.yml
}

View file

@ -341,6 +341,10 @@ bracken/bracken:
- modules/bracken/bracken/**
- tests/modules/bracken/bracken/**
busco:
- modules/busco/**
- tests/modules/busco/**
bwa/aln:
- modules/bwa/aln/**
- tests/modules/bwa/aln/**
@ -895,6 +899,14 @@ hamronization/summarize:
- modules/hamronization/summarize/**
- tests/modules/hamronization/summarize/**
happy/happy:
- modules/happy/happy/**
- tests/modules/happy/happy/**
happy/prepy:
- modules/happy/prepy/**
- tests/modules/happy/prepy/**
hicap:
- modules/hicap/**
- tests/modules/hicap/**
@ -1062,6 +1074,10 @@ krona/ktimporttext:
- modules/krona/ktimporttext/**
- tests/modules/krona/ktimporttext/**
krona/ktupdatetaxonomy:
- modules/krona/ktupdatetaxonomy/**
- tests/modules/krona/ktupdatetaxonomy/**
last/dotplot:
- modules/last/dotplot/**
- tests/modules/last/dotplot/**
@ -1182,6 +1198,10 @@ maxbin2:
- modules/maxbin2/**
- tests/modules/maxbin2/**
md5sum:
- modules/md5sum/**
- tests/modules/md5sum/**
medaka:
- modules/medaka/**
- tests/modules/medaka/**
@ -1198,6 +1218,18 @@ meningotype:
- modules/meningotype/**
- tests/modules/meningotype/**
meryl/count:
- modules/meryl/count/**
- tests/modules/meryl/count/**
meryl/histogram:
- modules/meryl/histogram/**
- tests/modules/meryl/histogram/**
meryl/unionsum:
- modules/meryl/unionsum/**
- tests/modules/meryl/unionsum/**
metabat2/jgisummarizebamcontigdepths:
- modules/metabat2/jgisummarizebamcontigdepths/**
- tests/modules/metabat2/jgisummarizebamcontigdepths/**
@ -1246,6 +1278,10 @@ mosdepth:
- modules/mosdepth/**
- tests/modules/mosdepth/**
motus/downloaddb:
- modules/motus/downloaddb/**
- tests/modules/motus/downloaddb/**
msisensor/msi:
- modules/msisensor/msi/**
- tests/modules/msisensor/msi/**
@ -1587,6 +1623,10 @@ rseqc/tin:
- modules/rseqc/tin/**
- tests/modules/rseqc/tin/**
rtgtools/vcfeval:
- modules/rtgtools/vcfeval/**
- tests/modules/rtgtools/vcfeval/**
salmon/index:
- modules/salmon/index/**
- tests/modules/salmon/index/**
@ -1607,9 +1647,9 @@ samtools/bam2fq:
- modules/samtools/bam2fq/**
- tests/modules/samtools/bam2fq/**
samtools/bamtocram:
- modules/samtools/bamtocram/**
- tests/modules/samtools/bamtocram/**
samtools/convert:
- modules/samtools/convert/**
- tests/modules/samtools/convert/**
samtools/collatefastq:
- modules/samtools/collatefastq/**
@ -1723,6 +1763,14 @@ seqwish/induce:
- modules/seqwish/induce/**
- tests/modules/seqwish/induce/**
shasum:
- modules/shasum/**
- tests/modules/shasum/**
shigatyper:
- modules/shigatyper/**
- tests/modules/shigatyper/**
shovill:
- modules/shovill/**
- tests/modules/shovill/**
@ -1731,18 +1779,18 @@ sistr:
- modules/sistr/**
- tests/modules/sistr/**
slimfastq:
- modules/slimfastq/**
- tests/modules/slimfastq/**
snapaligner/align:
- modules/snapaligner/align/**
- tests/modules/snapaligner/align/**
snapaligner/index:
- modules/snapaligner/index/**
- tests/modules/snapaligner/index/**
snapaligner/paired:
- modules/snapaligner/paired/**
- tests/modules/snapaligner/paired/**
snapaligner/single:
- modules/snapaligner/single/**
- tests/modules/snapaligner/single/**
snpdists:
- modules/snpdists/**
- tests/modules/snpdists/**
@ -1779,6 +1827,10 @@ sratools/prefetch:
- modules/sratools/prefetch/**
- tests/modules/sratools/prefetch/**
srst2/srst2:
- modules/srst2/srst2/**
- tests/modules/srst2/srst2/**
ssuissero:
- modules/ssuissero/**
- tests/modules/ssuissero/**
@ -1920,6 +1972,10 @@ unzip:
- modules/unzip/**
- tests/modules/unzip/**
vardictjava:
- modules/vardictjava/**
- tests/modules/vardictjava/**
variantbam:
- modules/variantbam/**
- tests/modules/variantbam/**

View file

@ -111,7 +111,9 @@ params {
test_sequencing_summary = "${test_data_dir}/genomics/sarscov2/nanopore/sequencing_summary/test.sequencing_summary.txt"
}
'metagenome' {
classified_reads_assignment = "${test_data_dir}/genomics/sarscov2/metagenome/test_1.kraken2.reads.txt"
kraken_report = "${test_data_dir}/genomics/sarscov2/metagenome/test_1.kraken2.report.txt"
krona_taxonomy = "${test_data_dir}/genomics/sarscov2/metagenome/krona_taxonomy.tab"
}
}
'homo_sapiens' {
@ -123,6 +125,7 @@ params {
genome_gff3 = "${test_data_dir}/genomics/homo_sapiens/genome/genome.gff3"
genome_gtf = "${test_data_dir}/genomics/homo_sapiens/genome/genome.gtf"
genome_interval_list = "${test_data_dir}/genomics/homo_sapiens/genome/genome.interval_list"
genome_multi_interval_bed = "${test_data_dir}/genomics/homo_sapiens/genome/genome.multi_intervals.bed"
genome_sizes = "${test_data_dir}/genomics/homo_sapiens/genome/genome.sizes"
genome_bed = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed"
genome_header = "${test_data_dir}/genomics/homo_sapiens/genome/genome.header"
@ -132,6 +135,7 @@ params {
transcriptome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/transcriptome.fasta"
genome2_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome2.fasta"
genome_chain_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.chain.gz"
genome_21_sdf = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/genome_sdf.tar.gz"
genome_21_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/genome.fasta"
genome_21_fasta_fai = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/genome.fasta.fai"
genome_21_dict = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/genome.dict"
@ -179,16 +183,16 @@ params {
vcfanno_toml = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/vcfanno/vcfanno.toml"
}
'pangenome' {
pangenome_fa = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.fa"
pangenome_fa_gz = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.fa.gz"
pangenome_paf = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.paf"
pangenome_paf_gz = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.paf.gz"
pangenome_seqwish_gfa = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.seqwish.gfa"
pangenome_smoothxg_gfa = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.smoothxg.gfa"
pangenome_gfaffix_gfa = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.gfaffix.gfa"
pangenome_fa = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.fa"
pangenome_fa_gz = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.fa.gz"
pangenome_paf = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.paf"
pangenome_paf_gz = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.paf.gz"
pangenome_seqwish_gfa = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.seqwish.gfa"
pangenome_smoothxg_gfa = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.smoothxg.gfa"
pangenome_gfaffix_gfa = "${test_data_dir}/pangenomics/homo_sapiens/pangenome.gfaffix.gfa"
'odgi' {
pangenome_og = "${test_data_dir}/pangenomics/homo_sapiens/odgi/pangenome.og"
pangenome_lay = "${test_data_dir}/pangenomics/homo_sapiens/odgi/pangenome.lay"
pangenome_og = "${test_data_dir}/pangenomics/homo_sapiens/odgi/pangenome.og"
pangenome_lay = "${test_data_dir}/pangenomics/homo_sapiens/odgi/pangenome.lay"
}
}
'illumina' {
@ -263,6 +267,9 @@ params {
test2_haplotc_ann_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/haplotypecaller_calls/test2_haplotc.ann.vcf.gz"
test2_haplotc_ann_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/haplotypecaller_calls/test2_haplotc.ann.vcf.gz.tbi"
test2_haplotc_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/haplotypecaller_calls/test2_haplotc.vcf.gz"
test2_haplotc_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/haplotypecaller_calls/test2_haplotc.vcf.gz.tbi"
test2_recal = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2.recal"
test2_recal_idx = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2.recal.idx"
test2_tranches = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2.tranches"
@ -315,25 +322,25 @@ params {
test_pytor = "${test_data_dir}/genomics/homo_sapiens/illumina/pytor/test.pytor"
}
'pacbio' {
primers = "${test_data_dir}/genomics/homo_sapiens/pacbio/fasta/primers.fasta"
alz = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.bam"
alzpbi = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.bam.pbi"
ccs = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.bam"
ccs_fa = "${test_data_dir}/genomics/homo_sapiens/pacbio/fasta/alz.ccs.fasta"
ccs_fa_gz = "${test_data_dir}/genomics/homo_sapiens/pacbio/fasta/alz.ccs.fasta.gz"
ccs_fq = "${test_data_dir}/genomics/homo_sapiens/pacbio/fastq/alz.ccs.fastq"
ccs_fq_gz = "${test_data_dir}/genomics/homo_sapiens/pacbio/fastq/alz.ccs.fastq.gz"
ccs_xml = "${test_data_dir}/genomics/homo_sapiens/pacbio/xml/alz.ccs.consensusreadset.xml"
hifi = "${test_data_dir}/genomics/homo_sapiens/pacbio/fastq/test_hifi.fastq.gz"
lima = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.bam"
refine = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.bam"
cluster = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.bam"
singletons = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.singletons.bam"
aligned = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.singletons.merged.aligned.bam"
alignedbai = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.singletons.merged.aligned.bam.bai"
genemodel1 = "${test_data_dir}/genomics/homo_sapiens/pacbio/bed/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.singletons.merged.aligned_tc.bed"
genemodel2 = "${test_data_dir}/genomics/homo_sapiens/pacbio/bed/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.singletons.merged.aligned_tc.2.bed"
filelist = "${test_data_dir}/genomics/homo_sapiens/pacbio/txt/filelist.txt"
primers = "${test_data_dir}/genomics/homo_sapiens/pacbio/fasta/primers.fasta"
alz = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.bam"
alzpbi = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.bam.pbi"
ccs = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.bam"
ccs_fa = "${test_data_dir}/genomics/homo_sapiens/pacbio/fasta/alz.ccs.fasta"
ccs_fa_gz = "${test_data_dir}/genomics/homo_sapiens/pacbio/fasta/alz.ccs.fasta.gz"
ccs_fq = "${test_data_dir}/genomics/homo_sapiens/pacbio/fastq/alz.ccs.fastq"
ccs_fq_gz = "${test_data_dir}/genomics/homo_sapiens/pacbio/fastq/alz.ccs.fastq.gz"
ccs_xml = "${test_data_dir}/genomics/homo_sapiens/pacbio/xml/alz.ccs.consensusreadset.xml"
hifi = "${test_data_dir}/genomics/homo_sapiens/pacbio/fastq/test_hifi.fastq.gz"
lima = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.bam"
refine = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.bam"
cluster = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.bam"
singletons = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.singletons.bam"
aligned = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.singletons.merged.aligned.bam"
alignedbai = "${test_data_dir}/genomics/homo_sapiens/pacbio/bam/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.singletons.merged.aligned.bam.bai"
genemodel1 = "${test_data_dir}/genomics/homo_sapiens/pacbio/bed/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.singletons.merged.aligned_tc.bed"
genemodel2 = "${test_data_dir}/genomics/homo_sapiens/pacbio/bed/alz.ccs.fl.NEB_5p--NEB_Clontech_3p.flnc.clustered.singletons.merged.aligned_tc.2.bed"
filelist = "${test_data_dir}/genomics/homo_sapiens/pacbio/txt/filelist.txt"
}
}
'bacteroides_fragilis' {
@ -342,6 +349,7 @@ params {
genome_gbff_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.gbff.gz"
genome_paf = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.paf"
genome_mapping_potential_arg = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.mapping.potential.ARG"
genome_gff_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.gff.gz"
}
'illumina' {
@ -391,31 +399,31 @@ params {
}
'generic' {
'csv' {
test_csv = "${test_data_dir}/generic/csv/test.csv"
test_csv = "${test_data_dir}/generic/csv/test.csv"
}
'notebooks' {
rmarkdown = "${test_data_dir}/generic/notebooks/rmarkdown/rmarkdown_notebook.Rmd"
ipython_md = "${test_data_dir}/generic/notebooks/jupyter/ipython_notebook.md"
ipython_ipynb = "${test_data_dir}/generic/notebooks/jupyter/ipython_notebook.ipynb"
rmarkdown = "${test_data_dir}/generic/notebooks/rmarkdown/rmarkdown_notebook.Rmd"
ipython_md = "${test_data_dir}/generic/notebooks/jupyter/ipython_notebook.md"
ipython_ipynb = "${test_data_dir}/generic/notebooks/jupyter/ipython_notebook.ipynb"
}
'tsv' {
test_tsv = "${test_data_dir}/generic/tsv/test.tsv"
test_tsv = "${test_data_dir}/generic/tsv/test.tsv"
}
'txt' {
hello = "${test_data_dir}/generic/txt/hello.txt"
hello = "${test_data_dir}/generic/txt/hello.txt"
}
'cnn' {
reference = "${test_data_dir}/generic/cnn/reference.cnn"
reference = "${test_data_dir}/generic/cnn/reference.cnn"
}
'cooler'{
test_pairix_pair_gz = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.GM12878-MboI.pairs.subsample.blksrt.txt.gz"
test_pairix_pair_gz_px2 = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.GM12878-MboI.pairs.subsample.blksrt.txt.gz.px2"
test_pairs_pair = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.sample1.pairs"
test_tabix_pair_gz = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.GM12878-MboI.pairs.subsample.sorted.possrt.txt.gz"
test_tabix_pair_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.GM12878-MboI.pairs.subsample.sorted.possrt.txt.gz.tbi"
hg19_chrom_sizes = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.chrom.sizes"
test_merge_cool = "${test_data_dir}/genomics/homo_sapiens/cooler/merge/toy/toy.symm.upper.2.cool"
test_merge_cool_cp2 = "${test_data_dir}/genomics/homo_sapiens/cooler/merge/toy/toy.symm.upper.2.cp2.cool"
test_pairix_pair_gz = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.GM12878-MboI.pairs.subsample.blksrt.txt.gz"
test_pairix_pair_gz_px2 = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.GM12878-MboI.pairs.subsample.blksrt.txt.gz.px2"
test_pairs_pair = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.sample1.pairs"
test_tabix_pair_gz = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.GM12878-MboI.pairs.subsample.sorted.possrt.txt.gz"
test_tabix_pair_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.GM12878-MboI.pairs.subsample.sorted.possrt.txt.gz.tbi"
hg19_chrom_sizes = "${test_data_dir}/genomics/homo_sapiens/cooler/cload/hg19/hg19.chrom.sizes"
test_merge_cool = "${test_data_dir}/genomics/homo_sapiens/cooler/merge/toy/toy.symm.upper.2.cool"
test_merge_cool_cp2 = "${test_data_dir}/genomics/homo_sapiens/cooler/merge/toy/toy.symm.upper.2.cp2.cool"
}
}

View file

@ -20,7 +20,7 @@ workflow test_arriba_single_end {
STAR_GENOMEGENERATE ( fasta, gtf )
STAR_ALIGN ( input, STAR_GENOMEGENERATE.out.index, gtf, star_ignore_sjdbgtf, seq_platform, seq_center )
ARRIBA ( STAR_ALIGN.out.bam, fasta, gtf )
ARRIBA ( STAR_ALIGN.out.bam, fasta, gtf , [], [], [], [], [])
}
workflow test_arriba_paired_end {
@ -38,5 +38,5 @@ workflow test_arriba_paired_end {
STAR_GENOMEGENERATE ( fasta, gtf )
STAR_ALIGN ( input, STAR_GENOMEGENERATE.out.index, gtf, star_ignore_sjdbgtf, seq_platform, seq_center )
ARRIBA ( STAR_ALIGN.out.bam, fasta, gtf )
ARRIBA ( STAR_ALIGN.out.bam, fasta, gtf, [], [], [], [], [])
}

View file

@ -4,7 +4,7 @@
- arriba
files:
- path: output/arriba/test.fusions.discarded.tsv
md5sum: cad8c215b938d1e45b747a5b7898a4c2
md5sum: 7602ab4ccbbb0c54fbca12a942877e6d
- path: output/arriba/test.fusions.tsv
md5sum: 7c3383f7eb6d79b84b0bd30a7ef02d70
- path: output/star/star/Genome
@ -39,6 +39,7 @@
- path: output/star/star/transcriptInfo.tab
md5sum: 0c3a5adb49d15e5feff81db8e29f2e36
- path: output/star/test.Aligned.out.bam
md5sum: 4fa079d11f8938e51015e3e477fa7149
- path: output/star/test.Log.final.out
- path: output/star/test.Log.out
- path: output/star/test.Log.progress.out
@ -50,7 +51,7 @@
- arriba
files:
- path: output/arriba/test.fusions.discarded.tsv
md5sum: 85e36c887464e4deaa65f45174d3b8fd
md5sum: cdc6cfbc75e68ce29a766f50f390274d
- path: output/arriba/test.fusions.tsv
md5sum: 7c3383f7eb6d79b84b0bd30a7ef02d70
- path: output/star/star/Genome

View file

@ -1,5 +1,16 @@
params {
force_large_index = false
}
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}
if (params.force_large_index) {
process {
withName: BOWTIE2_BUILD {
ext.args = '--large-index'
}
}
}

View file

@ -39,3 +39,45 @@
md5sum: 52be6950579598a990570fbcf5372184
- path: ./output/bowtie2/bowtie2/genome.rev.2.bt2
md5sum: e3b4ef343dea4dd571642010a7d09597
- name: bowtie2 align single-end large-index
command: nextflow run ./tests/modules/bowtie2/align -entry test_bowtie2_align_single_end -c ./tests/config/nextflow.config -c ./tests/modules/bowtie2/align/nextflow.config --force_large_index
tags:
- bowtie2
- bowtie2/align
files:
- path: ./output/bowtie2/test.bam
- path: ./output/bowtie2/test.bowtie2.log
- path: ./output/bowtie2/bowtie2/genome.3.bt2l
md5sum: 8952b3e0b1ce9a7a5916f2e147180853
- path: ./output/bowtie2/bowtie2/genome.2.bt2l
md5sum: 22c284084784a0720989595e0c9461fd
- path: ./output/bowtie2/bowtie2/genome.1.bt2l
md5sum: 07d811cd4e350d56267183d2ac7023a5
- path: ./output/bowtie2/bowtie2/genome.4.bt2l
md5sum: c25be5f8b0378abf7a58c8a880b87626
- path: ./output/bowtie2/bowtie2/genome.rev.1.bt2l
md5sum: fda48e35925fb24d1c0785f021981e25
- path: ./output/bowtie2/bowtie2/genome.rev.2.bt2l
md5sum: 802c26d32b970e1b105032b7ce7348b4
- name: bowtie2 align paired-end large-index
command: nextflow run ./tests/modules/bowtie2/align -entry test_bowtie2_align_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/bowtie2/align/nextflow.config --force_large_index
tags:
- bowtie2
- bowtie2/align
files:
- path: ./output/bowtie2/test.bam
- path: ./output/bowtie2/test.bowtie2.log
- path: ./output/bowtie2/bowtie2/genome.3.bt2l
md5sum: 8952b3e0b1ce9a7a5916f2e147180853
- path: ./output/bowtie2/bowtie2/genome.2.bt2l
md5sum: 22c284084784a0720989595e0c9461fd
- path: ./output/bowtie2/bowtie2/genome.1.bt2l
md5sum: 07d811cd4e350d56267183d2ac7023a5
- path: ./output/bowtie2/bowtie2/genome.4.bt2l
md5sum: c25be5f8b0378abf7a58c8a880b87626
- path: ./output/bowtie2/bowtie2/genome.rev.1.bt2l
md5sum: fda48e35925fb24d1c0785f021981e25
- path: ./output/bowtie2/bowtie2/genome.rev.2.bt2l
md5sum: 802c26d32b970e1b105032b7ce7348b4

404
tests/modules/busco/main.nf Normal file
View file

@ -0,0 +1,404 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BUSCO } from '../../../modules/busco/main.nf'
workflow test_busco_genome_single_fasta {
input = [
[ id:'test' ], // meta map
file( params.test_data['bacteroides_fragilis']['genome']['genome_fna_gz'], checkIfExists: true)
]
BUSCO (
input,
['bacteria_odb10', 'bacteroidetes_odb10'], // Launch with 'auto' to use --auto-lineage, and specified lineages // 'auto' removed from test due to memory issues
[], // Download busco lineage
[], // No config
)
/* Output tree:
/tmp/tmpyz_hi62i/busco/
├── short_summary.specific.bacteria_odb10.genome.fna.json -> /tmp/tmpza_0dth3/33/7d8c9b2c8931d9ad6a67aa843895e7/short_summary.specific.bacteria_odb10.genome.fna.json
├── short_summary.specific.bacteria_odb10.genome.fna.txt -> /tmp/tmpza_0dth3/33/7d8c9b2c8931d9ad6a67aa843895e7/short_summary.specific.bacteria_odb10.genome.fna.txt
├── short_summary.specific.bacteroidetes_odb10.genome.fna.json -> /tmp/tmpza_0dth3/6a/e95a0cd21785ce33d63b8f73a68a51/short_summary.specific.bacteroidetes_odb10.genome.fna.json
├── short_summary.specific.bacteroidetes_odb10.genome.fna.txt -> /tmp/tmpza_0dth3/6a/e95a0cd21785ce33d63b8f73a68a51/short_summary.specific.bacteroidetes_odb10.genome.fna.txt
├── test-bacteria_odb10-busco -> /tmp/tmpza_0dth3/33/7d8c9b2c8931d9ad6a67aa843895e7/test-bacteria_odb10-busco/
│ ├── genome.fna/
│ │ ├── logs/
│ │ │ ├── hmmsearch_err.log
│ │ │ ├── hmmsearch_out.log
│ │ │ ├── prodigal_err.log
│ │ │ └── prodigal_out.log
│ │ ├── prodigal_output/
│ │ │ └── predicted_genes/
│ │ └── run_bacteria_odb10/
│ │ ├── busco_sequences/
│ │ ├── full_table.tsv
│ │ ├── hmmer_output/
│ │ ├── missing_busco_list.tsv
│ │ ├── short_summary.json
│ │ └── short_summary.txt
│ └── logs/
│ └── busco.log
├── test-bacteria_odb10-busco.batch_summary.txt -> /tmp/tmpza_0dth3/33/7d8c9b2c8931d9ad6a67aa843895e7/test-bacteria_odb10-busco.batch_summary.txt
├── test-bacteroidetes_odb10-busco -> /tmp/tmpza_0dth3/6a/e95a0cd21785ce33d63b8f73a68a51/test-bacteroidetes_odb10-busco/
│ ├── genome.fna/
│ │ ├── logs/
│ │ │ ├── hmmsearch_err.log
│ │ │ ├── hmmsearch_out.log
│ │ │ ├── prodigal_err.log
│ │ │ └── prodigal_out.log
│ │ ├── prodigal_output/
│ │ │ └── predicted_genes/
│ │ └── run_bacteroidetes_odb10/
│ │ ├── busco_sequences/
│ │ ├── full_table.tsv
│ │ ├── hmmer_output/
│ │ ├── missing_busco_list.tsv
│ │ ├── short_summary.json
│ │ └── short_summary.txt
│ └── logs/
│ └── busco.log
├── test-bacteroidetes_odb10-busco.batch_summary.txt -> /tmp/tmpza_0dth3/6a/e95a0cd21785ce33d63b8f73a68a51/test-bacteroidetes_odb10-busco.batch_summary.txt
└── versions.yml -> /tmp/tmpza_0dth3/6a/e95a0cd21785ce33d63b8f73a68a51/versions.yml
Former Output tree -w 'auto':
/tmp/tmp846crjv2/busco/
├── short_summary.generic.bacteria_odb10.genome.fna.json -> /tmp/tmpi6af66j1/18/8be22ecd7a71471ff5082bd512972b/short_summary.generic.bacteria_odb10.genome.fna.json
├── short_summary.generic.bacteria_odb10.genome.fna.txt -> /tmp/tmpi6af66j1/18/8be22ecd7a71471ff5082bd512972b/short_summary.generic.bacteria_odb10.genome.fna.txt
├── short_summary.specific.bacteria_odb10.genome.fna.json -> /tmp/tmpi6af66j1/45/107812e983a8e695c380ebc215e7d9/short_summary.specific.bacteria_odb10.genome.fna.json
├── short_summary.specific.bacteria_odb10.genome.fna.txt -> /tmp/tmpi6af66j1/45/107812e983a8e695c380ebc215e7d9/short_summary.specific.bacteria_odb10.genome.fna.txt
├── short_summary.specific.bacteroidales_odb10.genome.fna.json -> /tmp/tmpi6af66j1/18/8be22ecd7a71471ff5082bd512972b/short_summary.specific.bacteroidales_odb10.genome.fna.json
├── short_summary.specific.bacteroidales_odb10.genome.fna.txt -> /tmp/tmpi6af66j1/18/8be22ecd7a71471ff5082bd512972b/short_summary.specific.bacteroidales_odb10.genome.fna.txt
├── short_summary.specific.bacteroidetes_odb10.genome.fna.json -> /tmp/tmpi6af66j1/a2/eb4a34894f3ac5554759ad6c9f652b/short_summary.specific.bacteroidetes_odb10.genome.fna.json
├── short_summary.specific.bacteroidetes_odb10.genome.fna.txt -> /tmp/tmpi6af66j1/a2/eb4a34894f3ac5554759ad6c9f652b/short_summary.specific.bacteroidetes_odb10.genome.fna.txt
├── test-auto-busco -> /tmp/tmpi6af66j1/18/8be22ecd7a71471ff5082bd512972b/test-auto-busco/
│ ├── genome.fna/
│ │ ├── auto_lineage/
│ │ │ ├── run_archaea_odb10/
│ │ │ ├── run_bacteria_odb10/
│ │ │ └── run_eukaryota_odb10/
│ │ ├── logs/
│ │ │ ├── hmmsearch_err.log
│ │ │ ├── hmmsearch_out.log
│ │ │ ├── metaeuk_err.log
│ │ │ ├── metaeuk_out.log
│ │ │ ├── prodigal_err.log
│ │ │ ├── prodigal_out.log
│ │ │ ├── sepp_err.log
│ │ │ └── sepp_out.log
│ │ ├── prodigal_output/
│ │ │ └── predicted_genes/
│ │ ├── run_bacteria_odb10 -> /tmp/tmpi6af66j1/18/8be22ecd7a71471ff5082bd512972b/test-auto-busco/genome.fna/auto_lineage/run_bacteria_odb10/ [recursive, not followed]
│ │ └── run_bacteroidales_odb10/
│ │ ├── busco_sequences/
│ │ ├── full_table.tsv
│ │ ├── hmmer_output/
│ │ ├── missing_busco_list.tsv
│ │ ├── short_summary.json
│ │ └── short_summary.txt
│ └── logs/
│ └── busco.log
├── test-auto-busco.batch_summary.txt -> /tmp/tmpi6af66j1/18/8be22ecd7a71471ff5082bd512972b/test-auto-busco.batch_summary.txt
├── test-bacteria_odb10-busco -> /tmp/tmpi6af66j1/45/107812e983a8e695c380ebc215e7d9/test-bacteria_odb10-busco/
│ ├── genome.fna/
│ │ ├── logs/
│ │ │ ├── hmmsearch_err.log
│ │ │ ├── hmmsearch_out.log
│ │ │ ├── prodigal_err.log
│ │ │ └── prodigal_out.log
│ │ ├── prodigal_output/
│ │ │ └── predicted_genes/
│ │ └── run_bacteria_odb10/
│ │ ├── busco_sequences/
│ │ ├── full_table.tsv
│ │ ├── hmmer_output/
│ │ ├── missing_busco_list.tsv
│ │ ├── short_summary.json
│ │ └── short_summary.txt
│ └── logs/
│ └── busco.log
├── test-bacteria_odb10-busco.batch_summary.txt -> /tmp/tmpi6af66j1/45/107812e983a8e695c380ebc215e7d9/test-bacteria_odb10-busco.batch_summary.txt
├── test-bacteroidetes_odb10-busco -> /tmp/tmpi6af66j1/a2/eb4a34894f3ac5554759ad6c9f652b/test-bacteroidetes_odb10-busco/
│ ├── genome.fna/
│ │ ├── logs/
│ │ │ ├── hmmsearch_err.log
│ │ │ ├── hmmsearch_out.log
│ │ │ ├── prodigal_err.log
│ │ │ └── prodigal_out.log
│ │ ├── prodigal_output/
│ │ │ └── predicted_genes/
│ │ └── run_bacteroidetes_odb10/
│ │ ├── busco_sequences/
│ │ ├── full_table.tsv
│ │ ├── hmmer_output/
│ │ ├── missing_busco_list.tsv
│ │ ├── short_summary.json
│ │ └── short_summary.txt
│ └── logs/
│ └── busco.log
├── test-bacteroidetes_odb10-busco.batch_summary.txt -> /tmp/tmpi6af66j1/a2/eb4a34894f3ac5554759ad6c9f652b/test-bacteroidetes_odb10-busco.batch_summary.txt
└── versions.yml -> /tmp/tmpi6af66j1/18/8be22ecd7a71471ff5082bd512972b/versions.yml
*/
}
workflow test_busco_genome_multi_fasta {
input = [
[ id:'test' ], // meta map
[
file( params.test_data['bacteroides_fragilis']['genome']['genome_fna_gz'], checkIfExists: true),
file( params.test_data['candidatus_portiera_aleyrodidarum']['genome']['genome_fasta'], checkIfExists: true)
]
]
BUSCO (
input,
'bacteria_odb10',
[], // Download busco lineage
[], // No config
)
/* Output tree:
/tmp/tmpk19byek7/busco/
├── short_summary.specific.bacteria_odb10.genome.fasta.json -> /tmp/tmplt9fv3tl/15/ff310a16d9ce7ad24e207a05ce718e/short_summary.specific.bacteria_odb10.genome.fasta.json
├── short_summary.specific.bacteria_odb10.genome.fasta.txt -> /tmp/tmplt9fv3tl/15/ff310a16d9ce7ad24e207a05ce718e/short_summary.specific.bacteria_odb10.genome.fasta.txt
├── short_summary.specific.bacteria_odb10.genome.fna.json -> /tmp/tmplt9fv3tl/15/ff310a16d9ce7ad24e207a05ce718e/short_summary.specific.bacteria_odb10.genome.fna.json
├── short_summary.specific.bacteria_odb10.genome.fna.txt -> /tmp/tmplt9fv3tl/15/ff310a16d9ce7ad24e207a05ce718e/short_summary.specific.bacteria_odb10.genome.fna.txt
├── test-bacteria_odb10-busco -> /tmp/tmplt9fv3tl/15/ff310a16d9ce7ad24e207a05ce718e/test-bacteria_odb10-busco/
│ ├── genome.fasta/
│ │ ├── logs/
│ │ │ ├── hmmsearch_err.log
│ │ │ ├── hmmsearch_out.log
│ │ │ ├── prodigal_err.log
│ │ │ └── prodigal_out.log
│ │ ├── prodigal_output/
│ │ │ └── predicted_genes/
│ │ └── run_bacteria_odb10/
│ │ ├── busco_sequences/
│ │ ├── full_table.tsv
│ │ ├── hmmer_output/
│ │ ├── missing_busco_list.tsv
│ │ ├── short_summary.json
│ │ └── short_summary.txt
│ ├── genome.fna/
│ │ ├── logs/
│ │ │ ├── hmmsearch_err.log
│ │ │ ├── hmmsearch_out.log
│ │ │ ├── prodigal_err.log
│ │ │ └── prodigal_out.log
│ │ ├── prodigal_output/
│ │ │ └── predicted_genes/
│ │ └── run_bacteria_odb10/
│ │ ├── busco_sequences/
│ │ ├── full_table.tsv
│ │ ├── hmmer_output/
│ │ ├── missing_busco_list.tsv
│ │ ├── short_summary.json
│ │ └── short_summary.txt
│ └── logs/
│ └── busco.log
├── test-bacteria_odb10-busco.batch_summary.txt -> /tmp/tmplt9fv3tl/15/ff310a16d9ce7ad24e207a05ce718e/test-bacteria_odb10-busco.batch_summary.txt
└── versions.yml -> /tmp/tmplt9fv3tl/15/ff310a16d9ce7ad24e207a05ce718e/versions.yml
*/
}
workflow test_busco_eukaryote_metaeuk {
input = [
[ id:'test' ], // meta map
file( params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
]
BUSCO (
input,
'eukaryota_odb10',
[], // Download busco lineage
[], // No config
)
/* Output tree:
/tmp/tmpeq4dsir5/busco/
├── short_summary.specific.eukaryota_odb10.genome.fasta.json -> /tmp/tmp60hby2pk/6f/529873d91cda6bae3a4a6a21746aee/short_summary.specific.eukaryota_odb10.genome.fasta.json
├── short_summary.specific.eukaryota_odb10.genome.fasta.txt -> /tmp/tmp60hby2pk/6f/529873d91cda6bae3a4a6a21746aee/short_summary.specific.eukaryota_odb10.genome.fasta.txt
├── test-eukaryota_odb10-busco -> /tmp/tmp60hby2pk/6f/529873d91cda6bae3a4a6a21746aee/test-eukaryota_odb10-busco/
│ ├── genome.fasta/
│ │ ├── logs/
│ │ │ ├── hmmsearch_err.log
│ │ │ ├── hmmsearch_out.log
│ │ │ ├── metaeuk_err.log
│ │ │ └── metaeuk_out.log
│ │ └── run_eukaryota_odb10/
│ │ ├── busco_sequences/
│ │ ├── full_table.tsv
│ │ ├── hmmer_output/
│ │ ├── metaeuk_output/
│ │ ├── missing_busco_list.tsv
│ │ ├── short_summary.json
│ │ └── short_summary.txt
│ └── logs/
│ └── busco.log
├── test-eukaryota_odb10-busco.batch_summary.txt -> /tmp/tmp60hby2pk/6f/529873d91cda6bae3a4a6a21746aee/test-eukaryota_odb10-busco.batch_summary.txt
└── versions.yml -> /tmp/tmp60hby2pk/6f/529873d91cda6bae3a4a6a21746aee/versions.yml
*/
}
workflow test_busco_eukaryote_augustus {
input = [
[ id:'test' ], // meta map
file( params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
]
BUSCO (
input,
'eukaryota_odb10',
[], // Download busco lineage
[], // No config
)
/* Output tree:
/tmp/tmp2xqaygjj/busco/
├── test-eukaryota_odb10-busco -> /tmp/tmpjqs61x9o/3f/67cc14e873c0ceb45e2a27594d624c/test-eukaryota_odb10-busco/
│ ├── genome.fasta/
│ │ ├── blast_db/
│ │ │ ├── genome.fasta.ndb
│ │ │ ├── genome.fasta.nhr
│ │ │ ├── genome.fasta.nin
│ │ │ ├── genome.fasta.not
│ │ │ ├── genome.fasta.nsq
│ │ │ ├── genome.fasta.ntf
│ │ │ └── genome.fasta.nto
│ │ ├── logs/
│ │ │ ├── makeblastdb_err.log
│ │ │ ├── makeblastdb_out.log
│ │ │ ├── tblastn_err.log
│ │ │ └── tblastn_out.log
│ │ └── run_eukaryota_odb10/
│ │ ├── augustus_output/
│ │ ├── blast_output/
│ │ ├── busco_sequences/
│ │ └── hmmer_output/
│ └── logs/
│ └── busco.log
├── test-eukaryota_odb10-busco.batch_summary.txt -> /tmp/tmpjqs61x9o/3f/67cc14e873c0ceb45e2a27594d624c/test-eukaryota_odb10-busco.batch_summary.txt
└── versions.yml -> /tmp/tmpjqs61x9o/3f/67cc14e873c0ceb45e2a27594d624c/versions.yml
*/
}
workflow test_busco_protein {
input = [
[ id:'test' ], // meta map
file( params.test_data['candidatus_portiera_aleyrodidarum']['genome']['proteome_fasta'], checkIfExists: true)
]
BUSCO (
input,
'bacteria_odb10',
[], // Download busco lineage
[], // No config
)
/* Output tree:
/tmp/tmpzwd5dn56/busco/
├── short_summary.specific.bacteria_odb10.proteome.fasta.json -> /tmp/tmpk1nlgbf_/ae/0db07b5cd08fb23d0aba5f134ebbe2/short_summary.specific.bacteria_odb10.proteome.fasta.json
├── short_summary.specific.bacteria_odb10.proteome.fasta.txt -> /tmp/tmpk1nlgbf_/ae/0db07b5cd08fb23d0aba5f134ebbe2/short_summary.specific.bacteria_odb10.proteome.fasta.txt
├── test-bacteria_odb10-busco -> /tmp/tmpk1nlgbf_/ae/0db07b5cd08fb23d0aba5f134ebbe2/test-bacteria_odb10-busco/
│ ├── logs/
│ │ └── busco.log
│ └── proteome.fasta/
│ ├── logs/
│ │ ├── hmmsearch_err.log
│ │ └── hmmsearch_out.log
│ └── run_bacteria_odb10/
│ ├── busco_sequences/
│ ├── full_table.tsv
│ ├── hmmer_output/
│ ├── missing_busco_list.tsv
│ ├── short_summary.json
│ └── short_summary.txt
├── test-bacteria_odb10-busco.batch_summary.txt -> /tmp/tmpk1nlgbf_/ae/0db07b5cd08fb23d0aba5f134ebbe2/test-bacteria_odb10-busco.batch_summary.txt
└── versions.yml -> /tmp/tmpk1nlgbf_/ae/0db07b5cd08fb23d0aba5f134ebbe2/versions.yml
*/
}
workflow test_busco_transcriptome {
input = [
[ id:'test' ], // meta map
file( params.test_data['bacteroides_fragilis']['illumina']['test1_contigs_fa_gz'], checkIfExists: true)
]
BUSCO (
input,
'bacteria_odb10',
[], // Download busco lineage
[], // No config
)
/* Output tree:
/tmp/tmpitjyvo9g/busco/
├── short_summary.specific.bacteria_odb10.test1.contigs.fa.json -> /tmp/tmp6wqi0eyx/4f/ed0b23f0fc807bb68091298845c135/short_summary.specific.bacteria_odb10.test1.contigs.fa.json
├── short_summary.specific.bacteria_odb10.test1.contigs.fa.txt -> /tmp/tmp6wqi0eyx/4f/ed0b23f0fc807bb68091298845c135/short_summary.specific.bacteria_odb10.test1.contigs.fa.txt
├── test-bacteria_odb10-busco -> /tmp/tmp6wqi0eyx/4f/ed0b23f0fc807bb68091298845c135/test-bacteria_odb10-busco/
│ ├── logs/
│ │ └── busco.log
│ └── test1.contigs.fa/
│ ├── blast_db/
│ │ ├── test1.contigs.fa.ndb
│ │ ├── test1.contigs.fa.nhr
│ │ ├── test1.contigs.fa.nin
│ │ ├── test1.contigs.fa.not
│ │ ├── test1.contigs.fa.nsq
│ │ ├── test1.contigs.fa.ntf
│ │ └── test1.contigs.fa.nto
│ ├── logs/
│ │ ├── hmmsearch_err.log
│ │ ├── hmmsearch_out.log
│ │ ├── makeblastdb_err.log
│ │ ├── makeblastdb_out.log
│ │ ├── tblastn_err.log
│ │ └── tblastn_out.log
│ ├── run_bacteria_odb10/
│ │ ├── blast_output/
│ │ ├── busco_sequences/
│ │ ├── full_table.tsv
│ │ ├── hmmer_output/
│ │ ├── missing_busco_list.tsv
│ │ ├── short_summary.json
│ │ ├── short_summary.txt
│ │ └── single_copy_proteins.faa
│ └── translated_proteins/
│ ├── 1024388at2.faa
│ ├── 1054741at2.faa
│ ├── 1093223at2.faa
│ ├── 1151822at2.faa
│ ├── 143460at2.faa
│ ├── 1491686at2.faa
│ ├── 1504821at2.faa
│ ├── 1574817at2.faa
│ ├── 1592033at2.faa
│ ├── 1623045at2.faa
│ ├── 1661836at2.faa
│ ├── 1674344at2.faa
│ ├── 1698718at2.faa
│ ├── 1990650at2.faa
│ ├── 223233at2.faa
│ ├── 402899at2.faa
│ ├── 505485at2.faa
│ ├── 665824at2.faa
│ ├── 776861at2.faa
│ ├── 874197at2.faa
│ ├── 932854at2.faa
│ └── 95696at2.faa
├── test-bacteria_odb10-busco.batch_summary.txt -> /tmp/tmp6wqi0eyx/4f/ed0b23f0fc807bb68091298845c135/test-bacteria_odb10-busco.batch_summary.txt
└── versions.yml -> /tmp/tmp6wqi0eyx/4f/ed0b23f0fc807bb68091298845c135/versions.yml
*/
}

View file

@ -0,0 +1,28 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: 'test_busco_genome_single_fasta:BUSCO' {
ext.args = '--mode genome'
}
withName: 'test_busco_genome_multi_fasta:BUSCO' {
ext.args = '--mode genome'
}
withName: 'test_busco_eukaryote_metaeuk:BUSCO' {
ext.args = '--mode genome'
}
withName: 'test_busco_eukaryote_augustus:BUSCO' {
ext.args = '--mode genome --augustus'
}
withName: 'test_busco_protein:BUSCO' {
ext.args = '--mode proteins'
}
withName: 'test_busco_transcriptome:BUSCO'{
ext.args = '--mode transcriptome'
}
}

View file

@ -0,0 +1,159 @@
- name: busco test_busco_genome_single_fasta
command: nextflow run tests/modules/busco -entry test_busco_genome_single_fasta -c tests/config/nextflow.config
tags:
- busco
files:
- path: output/busco/short_summary.specific.bacteria_odb10.genome.fna.json
contains:
- "one_line_summary"
- "input_file"
- "mode"
- "dataset"
- path: output/busco/short_summary.specific.bacteria_odb10.genome.fna.txt
contains:
- "BUSCO version"
- "The lineage dataset is"
- "BUSCO was run in mode"
- "Complete BUSCOs"
- "Missing BUSCOs"
- "Dependencies and versions"
- path: output/busco/short_summary.specific.bacteroidetes_odb10.genome.fna.json
contains:
- "one_line_summary"
- "input_file"
- "mode"
- "dataset"
- path: output/busco/short_summary.specific.bacteroidetes_odb10.genome.fna.txt
contains:
- "BUSCO version"
- "The lineage dataset is"
- "BUSCO was run in mode"
- "Complete BUSCOs"
- "Missing BUSCOs"
- "Dependencies and versions"
- path: output/busco/test-bacteria_odb10-busco.batch_summary.txt
md5sum: e50690742e9ae6abdd2bf99334ff9e12
- path: output/busco/test-bacteroidetes_odb10-busco.batch_summary.txt
md5sum: 4c1b2c4317c88398eddc30877ed740d9
- path: output/busco/versions.yml
md5sum: 8aa830f71587d859df35c6cfab59f35d
- name: busco test_busco_genome_multi_fasta
command: nextflow run tests/modules/busco -entry test_busco_genome_multi_fasta -c tests/config/nextflow.config
tags:
- busco
files:
- path: output/busco/short_summary.specific.bacteria_odb10.genome.fasta.json
contains:
- "one_line_summary"
- "input_file"
- "mode"
- "dataset"
- path: output/busco/short_summary.specific.bacteria_odb10.genome.fasta.txt
contains:
- "BUSCO version"
- "The lineage dataset is"
- "BUSCO was run in mode"
- "Complete BUSCOs"
- "Missing BUSCOs"
- "Dependencies and versions"
- path: output/busco/short_summary.specific.bacteria_odb10.genome.fna.json
contains:
- "one_line_summary"
- "input_file"
- "mode"
- "dataset"
- path: output/busco/short_summary.specific.bacteria_odb10.genome.fna.txt
contains:
- "BUSCO version"
- "The lineage dataset is"
- "BUSCO was run in mode"
- "Complete BUSCOs"
- "Missing BUSCOs"
- "Dependencies and versions"
- path: output/busco/test-bacteria_odb10-busco.batch_summary.txt
md5sum: 5360dfe83bec1f5741ee115e53e6b517
- path: output/busco/versions.yml
md5sum: 9a959eb0a1f765777dff1ea2f5c139c0
- name: busco test_busco_eukaryote_metaeuk
command: nextflow run tests/modules/busco -entry test_busco_eukaryote_metaeuk -c tests/config/nextflow.config
tags:
- busco
files:
- path: output/busco/short_summary.specific.eukaryota_odb10.genome.fasta.json
contains:
- "one_line_summary"
- "input_file"
- "mode"
- "dataset"
- path: output/busco/short_summary.specific.eukaryota_odb10.genome.fasta.txt
contains:
- "BUSCO version"
- "The lineage dataset is"
- "BUSCO was run in mode"
- "Complete BUSCOs"
- "Missing BUSCOs"
- "Dependencies and versions"
- path: output/busco/test-eukaryota_odb10-busco.batch_summary.txt
md5sum: a70806f99ba5706d7353d3353b3f1d2b
- path: output/busco/versions.yml
md5sum: 34a808c257e6db1b0456f3b4372bc477
- name: busco test_busco_eukaryote_augustus
command: nextflow run tests/modules/busco -entry test_busco_eukaryote_augustus -c tests/config/nextflow.config
tags:
- busco
files:
- path: output/busco/test-eukaryota_odb10-busco.batch_summary.txt
md5sum: 660393dd43cd6a093b952d4b8ad41e40
- path: output/busco/versions.yml
md5sum: 2caac915461410b16a1524ac064cd0df
- name: busco test_busco_protein
command: nextflow run tests/modules/busco -entry test_busco_protein -c tests/config/nextflow.config
tags:
- busco
files:
- path: output/busco/short_summary.specific.bacteria_odb10.proteome.fasta.json
contains:
- "one_line_summary"
- "input_file"
- "mode"
- "dataset"
- path: output/busco/short_summary.specific.bacteria_odb10.proteome.fasta.txt
contains:
- "BUSCO version"
- "The lineage dataset is"
- "BUSCO was run in mode"
- "Complete BUSCOs"
- "Missing BUSCOs"
- "Dependencies and versions"
- path: output/busco/test-bacteria_odb10-busco.batch_summary.txt
md5sum: fd3b4e30ce74d1fcb95d6286d6e2049f
- path: output/busco/versions.yml
md5sum: d7392261a57960a7e6aea609dce824f5
- name: busco test_busco_transcriptome
command: nextflow run tests/modules/busco -entry test_busco_transcriptome -c tests/config/nextflow.config
tags:
- busco
files:
- path: output/busco/short_summary.specific.bacteria_odb10.test1.contigs.fa.json
contains:
- "one_line_summary"
- "input_file"
- "mode"
- "dataset"
- path: output/busco/short_summary.specific.bacteria_odb10.test1.contigs.fa.txt
contains:
- "BUSCO version"
- "The lineage dataset is"
- "BUSCO was run in mode"
- "Complete BUSCOs"
- "Missing BUSCOs"
- "Dependencies and versions"
- path: output/busco/test-bacteria_odb10-busco.batch_summary.txt
md5sum: 9a176cafe66ac0adca89dc34ad2be13f
- path: output/busco/versions.yml
md5sum: 30eacbc7df70f6b1e72e0a7b6d02a7e1

View file

@ -4,10 +4,8 @@
- cnvpytor
- cnvpytor/callcnvs
files:
- path: output/cnvpytor/test.tsv
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: 0bea08a253fcb2ff0ff79b99df77b9fa
- name: cnvpytor callcnvs test_cnvpytor_callcnvs stub
command: nextflow run tests/modules/cnvpytor/callcnvs -entry test_cnvpytor_callcnvs -c tests/config/nextflow.config -stub-run
@ -15,6 +13,5 @@
- cnvpytor
- cnvpytor/callcnvs
files:
- path: output/cnvpytor/test.tsv
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: 0bea08a253fcb2ff0ff79b99df77b9fa

View file

@ -5,9 +5,7 @@
- cnvpytor/histogram
files:
- path: output/cnvpytor/test.pytor
md5sum: aa03a8fa15b39f77816705a48e10312a
- path: output/cnvpytor/versions.yml
md5sum: 0f4d75c4f3a3eb26c22616d12b0b78b2
- name: cnvpytor histogram test_cnvpytor_histogram stub
command: nextflow run tests/modules/cnvpytor/histogram -entry test_cnvpytor_histogram -c tests/config/nextflow.config -stub-run
@ -17,4 +15,3 @@
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: 0f4d75c4f3a3eb26c22616d12b0b78b2

View file

@ -6,7 +6,6 @@
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: 5834495324c08a37f3fd73ccdd881dc8
- name: cnvpytor importreaddepth test_cnvpytor_importreaddepth stub
command: nextflow run tests/modules/cnvpytor/importreaddepth -entry test_cnvpytor_importreaddepth -c tests/config/nextflow.config -stub-run
@ -16,7 +15,6 @@
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: 5834495324c08a37f3fd73ccdd881dc8
- name: cnvpytor importreaddepth test_cnvpytor_importreaddepth_cram
command: nextflow run tests/modules/cnvpytor/importreaddepth -entry test_cnvpytor_importreaddepth_cram -c tests/config/nextflow.config
@ -26,7 +24,6 @@
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: dfa0afb0982d985b96d1633f71ebb82a
- name: cnvpytor importreaddepth test_cnvpytor_importreaddepth_cram stub
command: nextflow run tests/modules/cnvpytor/importreaddepth -entry test_cnvpytor_importreaddepth_cram -c tests/config/nextflow.config -stub-run
@ -36,4 +33,3 @@
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: dfa0afb0982d985b96d1633f71ebb82a

View file

@ -5,9 +5,7 @@
- cnvpytor/partition
files:
- path: output/cnvpytor/test.pytor
md5sum: aa03a8fa15b39f77816705a48e10312a
- path: output/cnvpytor/versions.yml
md5sum: 7fd6ec952a316463bcd324f176b46b64
- name: cnvpytor partition test_cnvpytor_partition stub
command: nextflow run tests/modules/cnvpytor/partition -entry test_cnvpytor_partition -c tests/config/nextflow.config -stub-run
@ -17,4 +15,3 @@
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: 7fd6ec952a316463bcd324f176b46b64

View file

@ -0,0 +1,42 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { CNVPYTOR_VIEW } from '../../../../modules/cnvpytor/view/main.nf'
workflow test_cnvpytor_view {
input = [
[ id:'test'], // meta map
[file(params.test_data['homo_sapiens']['illumina']['test_pytor'], checkIfExists: true)]
]
bin_sizes = "10000 100000"
CNVPYTOR_VIEW ( input, bin_sizes, [] )
}
workflow test_cnvpytor_view_tsvout {
input = [
[ id:'test'], // meta map
[file(params.test_data['homo_sapiens']['illumina']['test_pytor'], checkIfExists: true)]
]
output_suffix = "tsv"
CNVPYTOR_VIEW ( input, [], output_suffix )
}
workflow test_cnvpytor_view_stub {
input = [
[ id:'test'], // meta map
[file(params.test_data['homo_sapiens']['illumina']['test_pytor'], checkIfExists: true)]
]
bin_sizes = []
output_suffix = []
CNVPYTOR_VIEW ( input, bin_sizes, output_suffix )
}

View file

@ -0,0 +1,7 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: CNVPYTOR_VIEW {
ext.args = '10000 100000'
}
}

View file

@ -0,0 +1,27 @@
- name: cnvpytor view test_cnvpytor_view
command: nextflow run tests/modules/cnvpytor/view -entry test_cnvpytor_view -c tests/config/nextflow.config
tags:
- cnvpytor
- cnvpytor/view
files:
- path: output/cnvpytor/test_10000.vcf
- path: output/cnvpytor/test_100000.vcf
- path: output/cnvpytor/versions.yml
- name: cnvpytor view test_cnvpytor_view tsv
command: nextflow run tests/modules/cnvpytor/view -entry test_cnvpytor_view_tsvout -c tests/config/nextflow.config
tags:
- cnvpytor
- cnvpytor/view
files:
- path: output/cnvpytor/test_1000.tsv
- path: output/cnvpytor/versions.yml
- name: cnvpytor view test_cnvpytor_view stub
command: nextflow run tests/modules/cnvpytor/view -entry test_cnvpytor_view_stub -c tests/config/nextflow.config -stub-run
tags:
- cnvpytor
- cnvpytor/view
files:
- path: output/cnvpytor/test.vcf
- path: output/cnvpytor/versions.yml

View file

@ -10,5 +10,5 @@ workflow test_ensemblvep {
file(params.test_data['sarscov2']['illumina']['test_vcf'], checkIfExists: true)
]
ENSEMBLVEP ( input, "WBcel235", "caenorhabditis_elegans", "104", [] )
ENSEMBLVEP ( input, "WBcel235", "caenorhabditis_elegans", "104", [], [] )
}

View file

@ -14,3 +14,14 @@ workflow test_gatk4_mergebamalignment {
GATK4_MERGEBAMALIGNMENT ( input, fasta, dict )
}
workflow test_gatk4_mergebamalignment_stubs {
input = [ [ id:'test' ], // meta map
file(params.test_data['sarscov2']['illumina']['test_single_end_bam'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_unaligned_bam'], checkIfExists: true)
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
dict = file(params.test_data['sarscov2']['genome']['genome_dict'], checkIfExists: true)
GATK4_MERGEBAMALIGNMENT ( input, fasta, dict )
}

View file

@ -7,3 +7,12 @@
- path: output/gatk4/test.bam
md5sum: e6f1b343700b7ccb94e81ae127433988
- path: output/gatk4/versions.yml
- name: gatk4 mergebamalignment test_gatk4_mergebamalignment_stubs
command: nextflow run ./tests/modules/gatk4/mergebamalignment -entry test_gatk4_mergebamalignment_stubs -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/mergebamalignment/nextflow.config -stub-run
tags:
- gatk4
- gatk4/mergebamalignment
files:
- path: output/gatk4/test.bam
- path: output/gatk4/versions.yml

View file

@ -118,3 +118,25 @@ workflow test_gatk4_mutect2_mitochondria {
GATK4_MUTECT2_MITO ( input, fasta, fai, dict, [], [], [], [] )
}
workflow test_gatk4_mutect2_tumor_normal_pair_f1r2_stubs {
input = [ [ id:'test', normal_id:'normal', tumor_id:'tumour' ], // meta map
[ file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_recalibrated_sorted_bam'], checkIfExists: true)
],
[ file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_bam_bai'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_recalibrated_sorted_bam_bai'], checkIfExists: true)
],
[]
]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_21_fasta'], checkIfExists: true)
fai = file(params.test_data['homo_sapiens']['genome']['genome_21_fasta_fai'], checkIfExists: true)
dict = file(params.test_data['homo_sapiens']['genome']['genome_21_dict'], checkIfExists: true)
germline_resource = file(params.test_data['homo_sapiens']['genome']['gnomad_r2_1_1_21_vcf_gz'], checkIfExists: true)
germline_resource_tbi = file(params.test_data['homo_sapiens']['genome']['gnomad_r2_1_1_21_vcf_gz_tbi'], checkIfExists: true)
panel_of_normals = file(params.test_data['homo_sapiens']['genome']['mills_and_1000g_indels_21_vcf_gz'], checkIfExists: true)
panel_of_normals_tbi = file(params.test_data['homo_sapiens']['genome']['mills_and_1000g_indels_21_vcf_gz_tbi'], checkIfExists: true)
GATK4_MUTECT2_F1R2 ( input, fasta, fai, dict, germline_resource, germline_resource_tbi, panel_of_normals, panel_of_normals_tbi )
}

View file

@ -69,3 +69,15 @@
md5sum: fc6ea14ca2da346babe78161beea28c9
- path: output/gatk4/test.vcf.gz.tbi
- path: output/gatk4/versions.yml
- name: gatk4 mutect2 test_gatk4_mutect2_tumor_normal_pair_f1r2_stubs
command: nextflow run ./tests/modules/gatk4/mutect2 -entry test_gatk4_mutect2_tumor_normal_pair_f1r2_stubs -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/mutect2/nextflow.config -stub-run
tags:
- gatk4
- gatk4/mutect2
files:
- path: output/gatk4/test.f1r2.tar.gz
- path: output/gatk4/test.vcf.gz
- path: output/gatk4/test.vcf.gz.stats
- path: output/gatk4/test.vcf.gz.tbi
- path: output/gatk4/versions.yml

View file

@ -11,3 +11,11 @@ workflow test_gatk4_revertsam {
GATK4_REVERTSAM ( input )
}
workflow test_gatk4_revertsam_stubs {
input = [ [ id:'test' ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true)
]
GATK4_REVERTSAM ( input )
}

View file

@ -7,3 +7,12 @@
- path: output/gatk4/test.reverted.bam
md5sum: f783a88deb45c3a2c20ca12cbe1c5652
- path: output/gatk4/versions.yml
- name: gatk4 revertsam test_gatk4_revertsam_stubs
command: nextflow run ./tests/modules/gatk4/revertsam -entry test_gatk4_revertsam_stubs -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/revertsam/nextflow.config -stub-run
tags:
- gatk4
- gatk4/revertsam
files:
- path: output/gatk4/test.reverted.bam
- path: output/gatk4/versions.yml

Some files were not shown because too many files have changed in this diff Show more