Merge branch 'master' of https://github.com/nf-core/modules into feat/update_cnvpytor

This commit is contained in:
Ramprasad Neethiraj 2022-05-03 14:55:42 +02:00
commit be085c0173
49 changed files with 1051 additions and 51 deletions

View file

@ -27,9 +27,7 @@ process ANTISMASH_ANTISMASHLITEDOWNLOADDATABASES {
output:
path("antismash_db") , emit: database
path("css"), emit: css_dir
path("detection"), emit: detection_dir
path("modules"), emit: modules_dir
path("antismash_dir"), emit: antismash_dir
path "versions.yml", emit: versions
when:
@ -37,11 +35,19 @@ process ANTISMASH_ANTISMASHLITEDOWNLOADDATABASES {
script:
def args = task.ext.args ?: ''
conda = params.enable_conda
"""
download-antismash-databases \\
--database-dir antismash_db \\
$args
if [[ $conda = false ]]; \
then \
cp -r /usr/local/lib/python3.8/site-packages/antismash antismash_dir; \
else \
cp -r \$(python -c 'import antismash;print(antismash.__file__.split("/__")[0])') antismash_dir; \
fi
cat <<-END_VERSIONS > versions.yml
"${task.process}":
antismash-lite: \$(antismash --version | sed 's/antiSMASH //')

View file

@ -50,21 +50,11 @@ output:
type: directory
description: Download directory for antiSMASH databases
pattern: "antismash_db"
- css_dir:
- antismash_dir:
type: directory
description: |
antismash/outputs/html/css folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the user by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "css"
- detection_dir:
type: directory
description: |
antismash/detection folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the user by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "detection"
- modules_dir:
type: directory
description: |
antismash/modules folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the user by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "modules"
antismash installation folder which is being modified during the antiSMASH database downloading step. The modified files are normally downloaded by download-antismash-databases itself, and must be retrieved by the user by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database and installation folder in pipelines.
pattern: "antismash_dir"
authors:
- "@jasmezz"

View file

@ -10,11 +10,18 @@ process DIAMOND_BLASTP {
input:
tuple val(meta), path(fasta)
path db
val outext
val out_ext
val blast_columns
output:
tuple val(meta), path('*.{blast,xml,txt,daa,sam,tsv,paf}'), emit: output
path "versions.yml" , emit: versions
tuple val(meta), path('*.blast'), optional: true, emit: blast
tuple val(meta), path('*.xml') , optional: true, emit: xml
tuple val(meta), path('*.txt') , optional: true, emit: txt
tuple val(meta), path('*.daa') , optional: true, emit: daa
tuple val(meta), path('*.sam') , optional: true, emit: sam
tuple val(meta), path('*.tsv') , optional: true, emit: tsv
tuple val(meta), path('*.paf') , optional: true, emit: paf
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
@ -22,7 +29,8 @@ process DIAMOND_BLASTP {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
switch ( outext ) {
def columns = blast_columns ? "${blast_columns}" : ''
switch ( out_ext ) {
case "blast": outfmt = 0; break
case "xml": outfmt = 5; break
case "txt": outfmt = 6; break
@ -30,6 +38,11 @@ process DIAMOND_BLASTP {
case "sam": outfmt = 101; break
case "tsv": outfmt = 102; break
case "paf": outfmt = 103; break
default:
outfmt = '6';
out_ext = 'txt';
log.warn("Unknown output file format provided (${out_ext}): selecting DIAMOND default of tabular BLAST output (txt)");
break
}
"""
DB=`find -L ./ -name "*.dmnd" | sed 's/.dmnd//'`
@ -39,9 +52,9 @@ process DIAMOND_BLASTP {
--threads $task.cpus \\
--db \$DB \\
--query $fasta \\
--outfmt ${outfmt} \\
--outfmt ${outfmt} ${columns} \\
$args \\
--out ${prefix}.${outext}
--out ${prefix}.${out_ext}
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -28,7 +28,7 @@ input:
type: directory
description: Directory containing the protein blast database
pattern: "*"
- outext:
- out_ext:
type: string
description: |
Specify the type of output file to be generated. `blast` corresponds to
@ -36,12 +36,42 @@ input:
`txt` corresponds to to BLAST tabular format. `tsv` corresponds to
taxonomic classification format.
pattern: "blast|xml|txt|daa|sam|tsv|paf"
- blast_columns:
type: string
description: |
Optional space separated list of DIAMOND tabular BLAST output keywords
used for in conjunction with the 'txt' out_ext option (--outfmt 6). See
DIAMOND documnetation for more information.
output:
- txt:
- blast:
type: file
description: File containing blastp hits
pattern: "*.{blastp.txt}"
pattern: "*.{blast}"
- xml:
type: file
description: File containing blastp hits
pattern: "*.{xml}"
- txt:
type: file
description: File containing hits in tabular BLAST format.
pattern: "*.{txt}"
- daa:
type: file
description: File containing hits DAA format
pattern: "*.{daa}"
- sam:
type: file
description: File containing aligned reads in SAM format
pattern: "*.{sam}"
- tsv:
type: file
description: Tab separated file containing taxonomic classification of hits
pattern: "*.{tsv}"
- paf:
type: file
description: File containing aligned reads in pairwise mapping format format
pattern: "*.{paf}"
- versions:
type: file
description: File containing software versions

View file

@ -10,11 +10,18 @@ process DIAMOND_BLASTX {
input:
tuple val(meta), path(fasta)
path db
val outext
val out_ext
val blast_columns
output:
tuple val(meta), path('*.{blast,xml,txt,daa,sam,tsv,paf}'), emit: output
path "versions.yml" , emit: versions
tuple val(meta), path('*.blast'), optional: true, emit: blast
tuple val(meta), path('*.xml') , optional: true, emit: xml
tuple val(meta), path('*.txt') , optional: true, emit: txt
tuple val(meta), path('*.daa') , optional: true, emit: daa
tuple val(meta), path('*.sam') , optional: true, emit: sam
tuple val(meta), path('*.tsv') , optional: true, emit: tsv
tuple val(meta), path('*.paf') , optional: true, emit: paf
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
@ -22,7 +29,8 @@ process DIAMOND_BLASTX {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
switch ( outext ) {
def columns = blast_columns ? "${blast_columns}" : ''
switch ( out_ext ) {
case "blast": outfmt = 0; break
case "xml": outfmt = 5; break
case "txt": outfmt = 6; break
@ -30,6 +38,11 @@ process DIAMOND_BLASTX {
case "sam": outfmt = 101; break
case "tsv": outfmt = 102; break
case "paf": outfmt = 103; break
default:
outfmt = '6';
out_ext = 'txt';
log.warn("Unknown output file format provided (${out_ext}): selecting DIAMOND default of tabular BLAST output (txt)");
break
}
"""
DB=`find -L ./ -name "*.dmnd" | sed 's/.dmnd//'`
@ -39,9 +52,9 @@ process DIAMOND_BLASTX {
--threads $task.cpus \\
--db \$DB \\
--query $fasta \\
--outfmt ${outfmt} \\
--outfmt ${outfmt} ${columns} \\
$args \\
--out ${prefix}.${outext}
--out ${prefix}.${out_ext}
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -28,7 +28,7 @@ input:
type: directory
description: Directory containing the nucelotide blast database
pattern: "*"
- outext:
- out_ext:
type: string
description: |
Specify the type of output file to be generated. `blast` corresponds to
@ -38,10 +38,34 @@ input:
pattern: "blast|xml|txt|daa|sam|tsv|paf"
output:
- blast:
type: file
description: File containing blastp hits
pattern: "*.{blast}"
- xml:
type: file
description: File containing blastp hits
pattern: "*.{xml}"
- txt:
type: file
description: File containing blastx hits
pattern: "*.{blastx.txt}"
description: File containing hits in tabular BLAST format.
pattern: "*.{txt}"
- daa:
type: file
description: File containing hits DAA format
pattern: "*.{daa}"
- sam:
type: file
description: File containing aligned reads in SAM format
pattern: "*.{sam}"
- tsv:
type: file
description: Tab separated file containing taxonomic classification of hits
pattern: "*.{tsv}"
- paf:
type: file
description: File containing aligned reads in pairwise mapping format format
pattern: "*.{paf}"
- versions:
type: file
description: File containing software versions

View file

@ -12,7 +12,7 @@ process GATK4_MARKDUPLICATES {
output:
tuple val(meta), path("*.bam") , emit: bam
tuple val(meta), path("*.bai") , emit: bai
tuple val(meta), path("*.bai") , optional:true, emit: bai
tuple val(meta), path("*.metrics"), emit: metrics
path "versions.yml" , emit: versions

View file

@ -43,4 +43,15 @@ process GATK4_MERGEBAMALIGNMENT {
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.bam
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
}

View file

@ -57,4 +57,18 @@ process GATK4_MUTECT2 {
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.vcf.gz
touch ${prefix}.vcf.gz.tbi
touch ${prefix}.vcf.gz.stats
touch ${prefix}.f1r2.tar.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
}

View file

@ -39,4 +39,15 @@ process GATK4_REVERTSAM {
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.reverted.bam
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
}

View file

@ -40,4 +40,17 @@ process GATK4_SAMTOFASTQ {
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.fastq.gz
touch ${prefix}_1.fastq.gz
touch ${prefix}_2.fastq.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
}

View file

@ -23,7 +23,7 @@ process METAPHLAN3 {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def input_type = ("$input".endsWith(".fastq.gz")) ? "--input_type fastq" : ("$input".contains(".fasta")) ? "--input_type fasta" : ("$input".endsWith(".bowtie2out.txt")) ? "--input_type bowtie2out" : "--input_type sam"
def input_type = ("$input".endsWith(".fastq.gz") || "$input".endsWith(".fq.gz")) ? "--input_type fastq" : ("$input".contains(".fasta")) ? "--input_type fasta" : ("$input".endsWith(".bowtie2out.txt")) ? "--input_type bowtie2out" : "--input_type sam"
def input_data = ("$input_type".contains("fastq")) && !meta.single_end ? "${input[0]},${input[1]}" : "$input"
def bowtie2_out = "$input_type" == "--input_type bowtie2out" || "$input_type" == "--input_type sam" ? '' : "--bowtie2out ${prefix}.bowtie2out.txt"

View file

@ -41,4 +41,16 @@ process SAMTOOLS_VIEW {
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.bam
touch ${prefix}.cram
cat <<-END_VERSIONS > versions.yml
"${task.process}":
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,64 @@
process SHIGATYPER {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::shigatyper=2.0.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/shigatyper%3A2.0.1--pyhdfd78af_0':
'quay.io/biocontainers/shigatyper:2.0.1--pyhdfd78af_0' }"
input:
tuple val(meta), path(reads)
output:
tuple val(meta), path("${prefix}.tsv") , emit: tsv
tuple val(meta), path("${prefix}-hits.tsv"), optional: true, emit: hits
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
prefix = task.ext.prefix ?: "${meta.id}"
if (meta.is_ont) {
"""
shigatyper \\
$args \\
--SE $reads \\
--ont \\
--name $prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
shigatyper: \$(echo \$(shigatyper --version 2>&1) | sed 's/^.*ShigaTyper //' )
END_VERSIONS
"""
} else if (meta.single_end) {
"""
shigatyper \\
$args \\
--SE $reads \\
--name $prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
shigatyper: \$(echo \$(shigatyper --version 2>&1) | sed 's/^.*ShigaTyper //' )
END_VERSIONS
"""
} else {
"""
shigatyper \\
$args \\
--R1 ${reads[0]} \\
--R2 ${reads[1]} \\
--name $prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
shigatyper: \$(echo \$(shigatyper --version 2>&1) | sed 's/^.*ShigaTyper //' )
END_VERSIONS
"""
}
}

View file

@ -0,0 +1,47 @@
name: "shigatyper"
description: Determine Shigella serotype from Illumina or Oxford Nanopore reads
keywords:
- fastq
- shigella
- serotype
tools:
- "shigatyper":
description: "Typing tool for Shigella spp. from WGS Illumina sequencing"
homepage: "https://github.com/CFSAN-Biostatistics/shigatyper"
documentation: "https://github.com/CFSAN-Biostatistics/shigatyper"
tool_dev_url: "https://github.com/CFSAN-Biostatistics/shigatyper"
doi: "10.1128/AEM.00165-19"
licence: "['Public Domain']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false, is_ont:false ]
- reads:
type: file
description: Illumina or Nanopore FASTQ file
pattern: "*.fastq.gz"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- tsv:
type: file
description: A TSV formatted file with ShigaTyper results
pattern: "*.tsv"
- hits:
type: file
description: A TSV formatted file with individual gene hits
pattern: "*-hits.tsv"
authors:
- "@rpetit3"

52
modules/slimfastq/main.nf Normal file
View file

@ -0,0 +1,52 @@
def VERSION = '2.04'
process SLIMFASTQ {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::slimfastq=2.04" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/slimfastq:2.04--h87f3376_2':
'quay.io/biocontainers/slimfastq:2.04--h87f3376_2' }"
input:
tuple val(meta), path(fastq)
output:
tuple val(meta), path("*.sfq"), emit: sfq
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
if (meta.single_end) {
"""
gzip -d -c '${fastq}' | slimfastq \\
$args \\
-f '${prefix}.sfq'
cat <<-END_VERSIONS > versions.yml
"${task.process}":
slimfastq: ${VERSION}
END_VERSIONS
"""
} else {
"""
gzip -d -c '${fastq[0]}' | slimfastq \\
$args \\
-f '${prefix}_1.sfq'
gzip -d -c '${fastq[1]}' | slimfastq \\
$args \\
-f '${prefix}_2.sfq'
cat <<-END_VERSIONS > versions.yml
"${task.process}":
slimfastq: ${VERSION}
END_VERSIONS
"""
}
}

View file

@ -0,0 +1,41 @@
name: "slimfastq"
description: Fast, efficient, lossless compression of FASTQ files.
keywords:
- FASTQ
- compression
- lossless
tools:
- "slimfastq":
description: "slimfastq efficiently compresses/decompresses FASTQ files"
homepage: "https://github.com/Infinidat/slimfastq"
tool_dev_url: "https://github.com/Infinidat/slimfastq"
licence: "['BSD-3-clause']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- fastq:
type: file
description: Either a single-end FASTQ file or paired-end files.
pattern: "*.{fq.gz,fastq.gz}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- sfq:
type: file
description: Either one or two sequence files in slimfastq compressed format.
pattern: "*.{sfq}"
authors:
- "@Midnighter"

View file

@ -0,0 +1,47 @@
process SRST2_SRST2 {
tag "${meta.id}"
label 'process_low'
conda (params.enable_conda ? "bioconda::srst2=0.2.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/srst2%3A0.2.0--py27_2':
'quay.io/biocontainers/srst2:0.2.0--py27_2'}"
input:
tuple val(meta), path(fastq_s), path(db)
output:
tuple val(meta), path("*_genes_*_results.txt") , optional:true, emit: gene_results
tuple val(meta), path("*_fullgenes_*_results.txt") , optional:true, emit: fullgene_results
tuple val(meta), path("*_mlst_*_results.txt") , optional:true, emit: mlst_results
tuple val(meta), path("*.pileup") , emit: pileup
tuple val(meta), path("*.sorted.bam") , emit: sorted_bam
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ""
def prefix = task.ext.prefix ?: "${meta.id}"
def read_s = meta.single_end ? "--input_se ${fastq_s}" : "--input_pe ${fastq_s[0]} ${fastq_s[1]}"
if (meta.db=="gene") {
database = "--gene_db ${db}"
} else if (meta.db=="mlst") {
database = "--mlst_db ${db}"
} else {
error "Please set meta.db to either \"gene\" or \"mlst\""
}
"""
srst2 \\
${read_s} \\
--threads $task.cpus \\
--output ${prefix} \\
${database} \\
$args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
srst2: \$(echo \$(srst2 --version 2>&1) | sed 's/srst2 //' ))
END_VERSIONS
"""
}

View file

@ -0,0 +1,72 @@
name: srst2_srst2
description: |
Short Read Sequence Typing for Bacterial Pathogens is a program designed to take Illumina sequence data,
a MLST database and/or a database of gene sequences (e.g. resistance genes, virulence genes, etc)
and report the presence of STs and/or reference genes.
keywords:
- mlst
- typing
- illumina
tools:
- srst2:
description: "Short Read Sequence Typing for Bacterial Pathogens"
homepage: "http://katholt.github.io/srst2/"
documentation: "https://github.com/katholt/srst2/blob/master/README.md"
tool_dev_url: "https://github.com/katholt/srst2"
doi: "10.1186/s13073-014-0090-6"
licence: ["BSD"]
input:
- meta:
type: map0.2.0-4
description: |
Groovy Map containing sample information
id: should be the identification number or sample name
single_end: should be true for single end data and false for paired in data
db: should be either 'gene' to use the --gene_db option or "mlst" to use the --mlst_db option
e.g. [ id:'sample', single_end:false , db:'gene']
- fasta:
type: file
description: |
gzipped fasta file. If files are NOT in
MiSeq format sample_S1_L001_R1_001.fastq.gz uses --forward and --reverse parameters; otherwise
default is _1, i.e. expect forward reads as sample_1.fastq.gz).
pattern: "*.fastq.gz"
- db:
type: file
description: Database in FASTA format
pattern: "*.fasta"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'sample', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- txt:
type: file
description: A detailed report, with one row per gene per sample described here github.com/katholt/srst2#gene-typing
pattern: "*_fullgenes_*_results.txt"
- txt:
type: file
description: A tabulated summary report of samples x genes.
pattern: "*_genes_*_results.txt"
- txt:
type: file
description: A tabulated summary report of mlst subtyping.
pattern: "*_mlst_*_results.txt"
- bam:
type: file
description: Sorted BAM file
pattern: "*.sorted.bam"
- pileup:
type: file
description: SAMtools pileup file
pattern: "*.pileup"
authors:
- "@jvhagey"

View file

@ -0,0 +1,50 @@
def VERSION = '1.8.3'
process VARDICTJAVA {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::vardict-java=1.8.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/vardict-java:1.8.3--hdfd78af_0':
'quay.io/biocontainers/vardict-java:1.8.3--hdfd78af_0' }"
input:
tuple val(meta), path(bam), path(bai)
path(bed)
tuple path(fasta), path(fasta_fai)
output:
tuple val(meta), path("*.vcf.gz"), emit: vcf
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
vardict-java \\
$args \\
-c 1 -S 2 -E 3 \\
-b $bam \\
-th $task.cpus \\
-N $prefix \\
-G $fasta \\
$bed \\
| teststrandbias.R \\
| var2vcf_valid.pl \\
$args2 \\
-N $prefix \\
| gzip -c > ${prefix}.vcf.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
vardict-java: $VERSION
var2vcf_valid.pl: \$(echo \$(var2vcf_valid.pl -h | sed -n 2p | awk '{ print \$2 }'))
END_VERSIONS
"""
}

View file

@ -0,0 +1,60 @@
name: "vardictjava"
description: The Java port of the VarDict variant caller
keywords:
- variant calling
- VarDict
- AstraZeneca
tools:
- "vardictjava":
description: "Java port of the VarDict variant discovery program"
homepage: "https://github.com/AstraZeneca-NGS/VarDictJava"
documentation: "https://github.com/AstraZeneca-NGS/VarDictJava"
tool_dev_url: "https://github.com/AstraZeneca-NGS/VarDictJava"
doi: "10.1093/nar/gkw227 "
licence: "['MIT']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: BAM/SAM file
pattern: "*.{bam,sam}"
- bai:
type: file
description: Index of the BAM file
pattern: "*.bai"
- fasta:
type: file
description: FASTA of the reference genome
pattern: "*.{fa,fasta}"
- fasta_fai:
type: file
description: The index of the FASTA of the reference genome
pattern: "*.fai"
- bed:
type: file
description: BED with the regions of interest
pattern: "*.bed"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- vcf:
type: file
description: VCF file output
pattern: "*.vcf.gz"
authors:
- "@nvnieuwk"

View file

@ -1719,6 +1719,10 @@ seqwish/induce:
- modules/seqwish/induce/**
- tests/modules/seqwish/induce/**
shigatyper:
- modules/shigatyper/**
- tests/modules/shigatyper/**
shovill:
- modules/shovill/**
- tests/modules/shovill/**
@ -1727,6 +1731,10 @@ sistr:
- modules/sistr/**
- tests/modules/sistr/**
slimfastq:
- modules/slimfastq/**
- tests/modules/slimfastq/**
snapaligner/index:
- modules/snapaligner/index/**
- tests/modules/snapaligner/index/**
@ -1775,6 +1783,10 @@ sratools/prefetch:
- modules/sratools/prefetch/**
- tests/modules/sratools/prefetch/**
srst2/srst2:
- modules/srst2/srst2/**
- tests/modules/srst2/srst2/**
ssuissero:
- modules/ssuissero/**
- tests/modules/ssuissero/**
@ -1916,6 +1928,10 @@ unzip:
- modules/unzip/**
- tests/modules/unzip/**
vardictjava:
- modules/vardictjava/**
- tests/modules/vardictjava/**
variantbam:
- modules/variantbam/**
- tests/modules/variantbam/**

View file

@ -1,8 +1,8 @@
- name: antismash antismashlitedownloaddatabases test_antismash_antismashlitedownloaddatabases
command: nextflow run tests/modules/antismash/antismashlitedownloaddatabases -entry test_antismash_antismashlitedownloaddatabases -c tests/config/nextflow.config
tags:
- antismash
- antismash/antismashlitedownloaddatabases
- antismash
files:
- path: output/antismash/versions.yml
md5sum: 24859c67023abab99de295d3675a24b6
@ -12,6 +12,5 @@
- path: output/antismash/antismash_db/pfam
- path: output/antismash/antismash_db/resfam
- path: output/antismash/antismash_db/tigrfam
- path: output/antismash/css
- path: output/antismash/detection
- path: output/antismash/modules
- path: output/antismash/antismash_dir
- path: output/antismash/antismash_dir/detection/hmm_detection/data/bgc_seeds.hmm

View file

@ -9,18 +9,20 @@ workflow test_diamond_blastp {
db = [ file(params.test_data['sarscov2']['genome']['proteome_fasta'], checkIfExists: true) ]
fasta = [ file(params.test_data['sarscov2']['genome']['proteome_fasta'], checkIfExists: true) ]
outext = 'txt'
out_ext = 'txt'
blast_columns = 'qseqid qlen'
DIAMOND_MAKEDB ( db )
DIAMOND_BLASTP ( [ [id:'test'], fasta ], DIAMOND_MAKEDB.out.db, outext )
DIAMOND_BLASTP ( [ [id:'test'], fasta ], DIAMOND_MAKEDB.out.db, out_ext, blast_columns )
}
workflow test_diamond_blastp_daa {
db = [ file(params.test_data['sarscov2']['genome']['proteome_fasta'], checkIfExists: true) ]
fasta = [ file(params.test_data['sarscov2']['genome']['proteome_fasta'], checkIfExists: true) ]
outext = 'daa'
out_ext = 'daa'
blast_columns = []
DIAMOND_MAKEDB ( db )
DIAMOND_BLASTP ( [ [id:'test'], fasta ], DIAMOND_MAKEDB.out.db, outext )
DIAMOND_BLASTP ( [ [id:'test'], fasta ], DIAMOND_MAKEDB.out.db, out_ext, blast_columns )
}

View file

@ -5,7 +5,6 @@
- diamond
files:
- path: output/diamond/test.diamond_blastp.txt
md5sum: 2515cf88590afa32356497e79a51fce9
- path: output/diamond/versions.yml
- name: diamond blastp test_diamond_blastp_daa
@ -15,5 +14,4 @@
- diamond
files:
- path: output/diamond/test.diamond_blastp.daa
md5sum: 0b539c68a5b66dd6e20ad5d218f4f4c6
- path: output/diamond/versions.yml

View file

@ -9,18 +9,20 @@ workflow test_diamond_blastx {
db = [ file(params.test_data['sarscov2']['genome']['proteome_fasta'], checkIfExists: true) ]
fasta = [ file(params.test_data['sarscov2']['genome']['transcriptome_fasta'], checkIfExists: true) ]
outext = 'txt'
out_ext = 'tfdfdt' // Nonsense file extension to check default case.
blast_columns = 'qseqid qlen'
DIAMOND_MAKEDB ( db )
DIAMOND_BLASTX ( [ [id:'test'], fasta ], DIAMOND_MAKEDB.out.db, outext )
DIAMOND_BLASTX ( [ [id:'test'], fasta ], DIAMOND_MAKEDB.out.db, out_ext, blast_columns )
}
workflow test_diamond_blastx_daa {
db = [ file(params.test_data['sarscov2']['genome']['proteome_fasta'], checkIfExists: true) ]
fasta = [ file(params.test_data['sarscov2']['genome']['transcriptome_fasta'], checkIfExists: true) ]
outext = 'daa'
out_ext = 'daa'
blast_columns = []
DIAMOND_MAKEDB ( db )
DIAMOND_BLASTX ( [ [id:'test'], fasta ], DIAMOND_MAKEDB.out.db, outext )
DIAMOND_BLASTX ( [ [id:'test'], fasta ], DIAMOND_MAKEDB.out.db, out_ext, blast_columns )
}

View file

@ -5,7 +5,6 @@
- diamond/blastx
files:
- path: output/diamond/test.diamond_blastx.txt
md5sum: eb2aebfa1cb42fcb2121c65528663307
- path: output/diamond/versions.yml
- name: diamond blastx test_diamond_blastx_daa

View file

@ -14,3 +14,14 @@ workflow test_gatk4_mergebamalignment {
GATK4_MERGEBAMALIGNMENT ( input, fasta, dict )
}
workflow test_gatk4_mergebamalignment_stubs {
input = [ [ id:'test' ], // meta map
"test_foo.bam",
"test_bar.bam"
]
fasta = "genome.fasta"
dict = "genome.fasta.dict"
GATK4_MERGEBAMALIGNMENT ( input, fasta, dict )
}

View file

@ -7,3 +7,12 @@
- path: output/gatk4/test.bam
md5sum: e6f1b343700b7ccb94e81ae127433988
- path: output/gatk4/versions.yml
- name: gatk4 mergebamalignment test_gatk4_mergebamalignment_stubs
command: nextflow run ./tests/modules/gatk4/mergebamalignment -entry test_gatk4_mergebamalignment -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/mergebamalignment/nextflow.config -stub-run
tags:
- gatk4
- gatk4/mergebamalignment
files:
- path: output/gatk4/test.bam
- path: output/gatk4/versions.yml

View file

@ -118,3 +118,25 @@ workflow test_gatk4_mutect2_mitochondria {
GATK4_MUTECT2_MITO ( input, fasta, fai, dict, [], [], [], [] )
}
workflow test_gatk4_mutect2_tumor_normal_pair_f1r2_stubs {
input = [ [ id:'test', normal_id:'normal', tumor_id:'tumour' ], // meta map
[ "foo_paired.bam",
"foo_paired2.bam"
],
[ "foo_paired.bam.bai",
"foo_paired2.bam.bai"
],
[]
]
fasta = "genome.fasta"
fai = "genome.fasta.fai"
dict = "genome.fasta.dict"
germline_resource = "genome_gnomAD.r2.1.1.vcf.gz"
germline_resource_tbi = "genome_gnomAD.r2.1.1.vcf.gz.tbi"
panel_of_normals = "genome_mills_and_1000G.indels.hg38.vcf.gz"
panel_of_normals_tbi = "genome_mills_and_1000G.indels.hg38.vcf.gz.tbi"
GATK4_MUTECT2_F1R2 ( input, fasta, fai, dict, germline_resource, germline_resource_tbi, panel_of_normals, panel_of_normals_tbi )
}

View file

@ -69,3 +69,15 @@
md5sum: fc6ea14ca2da346babe78161beea28c9
- path: output/gatk4/test.vcf.gz.tbi
- path: output/gatk4/versions.yml
- name: gatk4 mutect2 test_gatk4_mutect2_tumor_normal_pair_f1r2_stubs
command: nextflow run ./tests/modules/gatk4/mutect2 -entry test_gatk4_mutect2_tumor_normal_pair_f1r2 -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/mutect2/nextflow.config -stub-run
tags:
- gatk4
- gatk4/mutect2
files:
- path: output/gatk4/test.f1r2.tar.gz
- path: output/gatk4/test.vcf.gz
- path: output/gatk4/test.vcf.gz.stats
- path: output/gatk4/test.vcf.gz.tbi
- path: output/gatk4/versions.yml

View file

@ -11,3 +11,11 @@ workflow test_gatk4_revertsam {
GATK4_REVERTSAM ( input )
}
workflow test_gatk4_revertsam_stubs {
input = [ [ id:'test' ], // meta map
"foo_paired_end.bam"
]
GATK4_REVERTSAM ( input )
}

View file

@ -7,3 +7,12 @@
- path: output/gatk4/test.reverted.bam
md5sum: f783a88deb45c3a2c20ca12cbe1c5652
- path: output/gatk4/versions.yml
- name: gatk4 revertsam test_gatk4_revertsam_stubs
command: nextflow run ./tests/modules/gatk4/revertsam -entry test_gatk4_revertsam -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/revertsam/nextflow.config -stub-run
tags:
- gatk4
- gatk4/revertsam
files:
- path: output/gatk4/test.reverted.bam
- path: output/gatk4/versions.yml

View file

@ -19,3 +19,11 @@ workflow test_gatk4_samtofastq_paired_end {
GATK4_SAMTOFASTQ ( input )
}
workflow test_gatk4_samtofastq_paired_end_stubs {
input = [ [ id:'test', single_end: false ], // meta map
[ "foo_paired_end.bam" ]
]
GATK4_SAMTOFASTQ ( input )
}

View file

@ -19,3 +19,13 @@
- path: output/gatk4/test_2.fastq.gz
md5sum: 613bf64c023609e1c62ad6ce9e4be8d7
- path: output/gatk4/versions.yml
- name: gatk4 samtofastq test_gatk4_samtofastq_paired_end_stubs
command: nextflow run ./tests/modules/gatk4/samtofastq -entry test_gatk4_samtofastq_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/samtofastq/nextflow.config -stub-run
tags:
- gatk4
- gatk4/samtofastq
files:
- path: output/gatk4/test_1.fastq.gz
- path: output/gatk4/test_2.fastq.gz
- path: output/gatk4/versions.yml

View file

@ -22,3 +22,12 @@ workflow test_samtools_view_cram {
SAMTOOLS_VIEW ( input, fasta )
}
workflow test_samtools_view_stubs {
input = [ [ id:'test', single_end:false ], // meta map
"foo_paired_end.bam",
[]
]
SAMTOOLS_VIEW ( input, [] )
}

View file

@ -14,3 +14,11 @@
- samtools
files:
- path: output/samtools/test.cram
- name: samtools view test_samtools_view_stubs
command: nextflow run ./tests/modules/samtools/view -entry test_samtools_view -c ./tests/config/nextflow.config -c ./tests/modules/samtools/view/nextflow.config -stub-run
tags:
- samtools/view
- samtools
files:
- path: output/samtools/test.bam

View file

@ -0,0 +1,36 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SHIGATYPER } from '../../../modules/shigatyper/main.nf'
workflow test_shigatyper_pe {
input = [
[ id:'test', single_end:false, is_ont:false ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
]
SHIGATYPER ( input )
}
workflow test_shigatyper_se {
input = [
[ id:'test', single_end:true, is_ont:false ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) ]
]
SHIGATYPER ( input )
}
workflow test_shigatyper_ont {
input = [
[ id:'test', single_end:true, is_ont:true ], // meta map
[ file(params.test_data['sarscov2']['nanopore']['test_fastq_gz'], checkIfExists: true) ]
]
SHIGATYPER ( input )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,29 @@
- name: shigatyper test_shigatyper_pe
command: nextflow run tests/modules/shigatyper -entry test_shigatyper_pe -c tests/config/nextflow.config -c tests/modules/shigatyper/nextflow.config
tags:
- shigatyper
files:
- path: output/shigatyper/test.tsv
md5sum: 4f7d38c956993800546b9acb9881d717
- path: output/shigatyper/versions.yml
md5sum: d8ca45ed88dfba9bc570c01e4b49773b
- name: shigatyper test_shigatyper_se
command: nextflow run tests/modules/shigatyper -entry test_shigatyper_se -c tests/config/nextflow.config -c tests/modules/shigatyper/nextflow.config
tags:
- shigatyper
files:
- path: output/shigatyper/test.tsv
md5sum: 4f7d38c956993800546b9acb9881d717
- path: output/shigatyper/versions.yml
md5sum: 8bbf165da5a5df3b7771a33aad197eec
- name: shigatyper test_shigatyper_ont
command: nextflow run tests/modules/shigatyper -entry test_shigatyper_ont -c tests/config/nextflow.config -c tests/modules/shigatyper/nextflow.config
tags:
- shigatyper
files:
- path: output/shigatyper/test.tsv
md5sum: 4f7d38c956993800546b9acb9881d717
- path: output/shigatyper/versions.yml
md5sum: 0da333e1178e9e7e84a9116ad5a5ff71

View file

@ -0,0 +1,46 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SLIMFASTQ } from '../../../modules/slimfastq/main.nf'
workflow test_slimfastq_single_end {
input = [
[ id:'test', single_end:true ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
SLIMFASTQ ( input )
}
workflow test_slimfastq_paired_end {
input = [
[ id:'test', single_end:false ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true)]
]
SLIMFASTQ ( input )
}
workflow test_slimfastq_nanopore {
input = [
[ id:'test', single_end:true ], // meta map
file(params.test_data['sarscov2']['nanopore']['test_fastq_gz'], checkIfExists: true)
]
SLIMFASTQ ( input )
}
workflow test_slimfastq_pacbio {
input = [
[ id:'test', single_end:true ], // meta map
file(params.test_data['homo_sapiens']['pacbio']['ccs_fq_gz'], checkIfExists: true)
]
SLIMFASTQ ( input )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,41 @@
- name: slimfastq test_slimfastq_single_end
command: nextflow run tests/modules/slimfastq -entry test_slimfastq_single_end -c tests/config/nextflow.config
tags:
- slimfastq
files:
- path: output/slimfastq/test.sfq
md5sum: 6a942eeca6c99ee9a9a0cedab5d246f1
- path: output/slimfastq/versions.yml
md5sum: f52351f5c9e6259af02745c8eae5c780
- name: slimfastq test_slimfastq_paired_end
command: nextflow run tests/modules/slimfastq -entry test_slimfastq_paired_end -c tests/config/nextflow.config
tags:
- slimfastq
files:
- path: output/slimfastq/test_1.sfq
md5sum: 6a942eeca6c99ee9a9a0cedab5d246f1
- path: output/slimfastq/test_2.sfq
md5sum: 0d2c60b52a39f7c2cb7843e848d90afd
- path: output/slimfastq/versions.yml
md5sum: 6239853705877651a4851c4cb6d62da4
- name: slimfastq test_slimfastq_nanopore
command: nextflow run tests/modules/slimfastq -entry test_slimfastq_nanopore -c tests/config/nextflow.config
tags:
- slimfastq
files:
- path: output/slimfastq/test.sfq
md5sum: e17f14d64d3a75356b03ff2f9e8881f7
- path: output/slimfastq/versions.yml
md5sum: 33153f1103482a2bd35cb2f4c337c5e8
- name: slimfastq test_slimfastq_pacbio
command: nextflow run tests/modules/slimfastq -entry test_slimfastq_pacbio -c tests/config/nextflow.config
tags:
- slimfastq
files:
- path: output/slimfastq/test.sfq
md5sum: 9e8389e47e6ddf8c25e92412dd628339
- path: output/slimfastq/versions.yml
md5sum: 1982789c3d5c7de37c0a9351e4ae63f7

View file

@ -0,0 +1,53 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SRST2_SRST2 } from '../../../../modules/srst2/srst2/main.nf'
workflow test_srst2_srst2_exit {
input = [
[ id:'test', single_end:false, db:"test"], // meta map
[ file(params.test_data['bacteroides_fragilis']['illumina']['test1_1_fastq_gz'], checkIfExists: true),
file(params.test_data['bacteroides_fragilis']['illumina']['test1_2_fastq_gz'], checkIfExists: true) ],
// [("")]
file('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/srst2/resFinder_20180221_srst2.fasta')
]
SRST2_SRST2(input)
}
workflow test_srst2_srst2_mlst {
input = [
[ id:'test', single_end:false, db:"mlst"], // meta map
[ file("https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/srst2/SRR9067271_1.fastq.gz", checkIfExists: true),
file("https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/srst2/SRR9067271_2.fastq.gz", checkIfExists: true) ],
file('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/srst2/MLST_DB.fas')
]
SRST2_SRST2(input)
}
workflow test_srst2_srst2_paired_end {
input = [
[ id:'test', single_end:false, db:"gene"], // meta map
[ file(params.test_data['bacteroides_fragilis']['illumina']['test1_1_fastq_gz'], checkIfExists: true),
file(params.test_data['bacteroides_fragilis']['illumina']['test1_2_fastq_gz'], checkIfExists: true) ],
file('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/srst2/resFinder_20180221_srst2.fasta') // Change to params.test_data syntax after the data is included in tests/config/test_data.config
]
SRST2_SRST2(input)
}
workflow test_srst2_srst2_single_end {
input = [
[ id:'test', single_end:true, db:"gene" ], // meta map
file(params.test_data['bacteroides_fragilis']['illumina']['test1_1_fastq_gz'], checkIfExists: true),
file('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/srst2/resFinder_20180221_srst2.fasta') // Change to params.test_data syntax after the data is included in tests/config/test_data.config
]
SRST2_SRST2(input)
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,51 @@
- name: srst2 srst2 test_srst2_srst2_exit #Testing pipeline exit when not meta.db
command: nextflow run tests/modules/srst2/srst2 -entry test_srst2_srst2_exit -c tests/config/nextflow.config
tags:
- srst2/srst2
- srst2
exit_code: 1
- name: srst2 srst2 test_srst2_srst2_mlst
command: nextflow run tests/modules/srst2/srst2 -entry test_srst2_srst2_mlst -c tests/config/nextflow.config
tags:
- srst2/srst2
- srst2
files:
- path: output/srst2/test__SRR9067271.MLST_DB.pileup
contains:
- "dnaJ-1 2 C 17 .........,....... FFFFFFFFFFFFFFFFF"
- path: output/srst2/test__SRR9067271.MLST_DB.sorted.bam
- path: output/srst2/test__mlst__MLST_DB__results.txt
md5sum: ec1b1f69933401d67c57f64cad11a098
- path: output/srst2/versions.yml
md5sum: a0c256a2fd3636069710b8ef22ee5ea7
- name: srst2 srst2 test_srst2_srst2_paired_end
command: nextflow run tests/modules/srst2/srst2 -entry test_srst2_srst2_paired_end -c tests/config/nextflow.config
tags:
- srst2/srst2
- srst2
files:
- path: output/srst2/test__genes__resFinder_20180221_srst2__results.txt
md5sum: 099aa6cacec5524b311f606debdfb3a9
- path: output/srst2/test__test1.resFinder_20180221_srst2.pileup
md5sum: 64b512ff495b828c456405ec7b676ad1
- path: output/srst2/test__test1.resFinder_20180221_srst2.sorted.bam
- path: output/srst2/versions.yml
md5sum: b446a70f1a2b4f60757829bcd744a214
- name: srst2 srst2 test_srst2_srst2_single_end
command: nextflow run tests/modules/srst2/srst2 -entry test_srst2_srst2_single_end -c tests/config/nextflow.config
tags:
- srst2/srst2
- srst2
files:
- path: output/srst2/test__fullgenes__resFinder_20180221_srst2__results.txt
md5sum: d0762ef8c38afd0e0a34cce52ed1a3db
- path: output/srst2/test__genes__resFinder_20180221_srst2__results.txt
md5sum: b8850c6644406d8b131e471ecc3f9013
- path: output/srst2/test__test1_1.resFinder_20180221_srst2.pileup
md5sum: 5f6279dc8124aa762a9dfe3d7a871277
- path: output/srst2/test__test1_1.resFinder_20180221_srst2.sorted.bam
- path: output/srst2/versions.yml
md5sum: 790fe00493c6634d17801a930073218b

View file

@ -0,0 +1,23 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { VARDICTJAVA } from '../../../modules/vardictjava/main.nf'
workflow test_vardictjava {
bam_input_ch = Channel.value([
[ id:'test' ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam_bai'], checkIfExists: true)
])
bed = Channel.value(file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true))
reference = Channel.value([
file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true),
file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
])
VARDICTJAVA ( bam_input_ch, bed, reference )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,9 @@
- name: vardictjava test_vardictjava
command: nextflow run tests/modules/vardictjava -entry test_vardictjava -c tests/config/nextflow.config
tags:
- vardictjava
files:
- path: output/vardictjava/test.vcf.gz
md5sum: 3f1f227afc532bddeb58f16fd3013fc8
- path: output/vardictjava/versions.yml
md5sum: 9b62c431a4f2680412b61c7071bdb1cd