Merge branch 'nf-core:master' into master

This commit is contained in:
James A. Fellows Yates 2022-03-24 17:22:38 +01:00 committed by GitHub
commit 9b1e456754
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
43 changed files with 540 additions and 112 deletions

View file

@ -133,7 +133,7 @@ We have written a helper command in the `nf-core/tools` package that uses the Gi
## Adding new modules
If you wish to contribute a new module, please see the documentation on the [nf-core website](https://nf-co.re/developers/adding_modules).
If you wish to contribute a new module, please see the documentation on the [nf-core website](https://nf-co.re/developers/modules#writing-a-new-module-reference).
> Please be kind to our code reviewers and submit one pull request per module :)

View file

@ -0,0 +1,38 @@
def VERSION = '0.05'
process ADAPTERREMOVALFIXPREFIX {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::adapterremovalfixprefix=0.0.5" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/adapterremovalfixprefix:0.0.5--hdfd78af_2':
'quay.io/biocontainers/adapterremovalfixprefix:0.0.5--hdfd78af_2' }"
input:
tuple val(meta), path(fastq)
output:
tuple val(meta), path("*.fq.gz"), emit: fixed_fastq
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
if ("$fastq" == "${prefix}.fq.gz") error "Input and output names are the same, set prefix in module configuration to disambiguate!"
"""
AdapterRemovalFixPrefix \\
$fastq \\
$args \\
| gzip > ${prefix}.fq.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
adapterremovalfixprefix: $VERSION
END_VERSIONS
"""
}

View file

@ -0,0 +1,43 @@
name: adapterremovalfixprefix
description: Fixes prefixes from AdapterRemoval2 output to make sure no clashing read names are in the output. For use with DeDup.
keywords:
- adapterremoval
- fastq
- dedup
tools:
- adapterremovalfixprefix:
description: Fixes adapter removal prefixes to make sure no clashing read names are in the output.
homepage: https://github.com/apeltzer/AdapterRemovalFixPrefix
documentation: None
tool_dev_url: https://github.com/apeltzer/AdapterRemovalFixPrefix
doi: "10.1186/s13059-016-0918-z"
licence: ["GPL v3"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- fastq:
type: file
description: FASTQ file from AdapterRemoval2
pattern: "*.{fq.gz,fastq.gz}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- fixed_fastq:
type: file
description: FASTQ file with fixed read prefixes for DeDup
pattern: "*.{fq.gz}"
authors:
- "@jfy133"

View file

@ -0,0 +1,41 @@
process BRACKEN_BRACKEN {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::bracken=2.6.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/bracken:2.6.2--py39hc16433a_0':
'quay.io/biocontainers/bracken:2.6.2--py39hc16433a_0' }"
input:
tuple val(meta), path(kraken_report)
path database
output:
tuple val(meta), path(bracken_report), emit: reports
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def threshold = meta.threshold ?: 10
def taxonomic_level = meta.taxonomic_level ?: 'S'
def read_length = meta.read_length ?: 150
def args = task.ext.args ?: "-l ${taxonomic_level} -t ${threshold} -r ${read_length}"
def prefix = task.ext.prefix ?: "${meta.id}"
def bracken_version = '2.6.2'
bracken_report = "${prefix}_${taxonomic_level}.tsv"
"""
bracken \\
${args} \\
-d '${database}' \\
-i '${kraken_report}' \\
-o '${bracken_report}'
cat <<-END_VERSIONS > versions.yml
"${task.process}":
bracken: ${bracken_version}
END_VERSIONS
"""
}

View file

@ -0,0 +1,45 @@
name: bracken_bracken
description: Re-estimate taxonomic abundance of metagenomic samples analyzed by kraken.
keywords:
- sort
tools:
- bracken:
description: Bracken (Bayesian Reestimation of Abundance with KrakEN) is a highly accurate statistical method that computes the abundance of species in DNA sequences from a metagenomics sample.
homepage: https://ccb.jhu.edu/software/bracken/
documentation: https://ccb.jhu.edu/software/bracken/index.shtml?t=manual
tool_dev_url: https://github.com/jenniferlu717/Bracken
doi: "10.7717/peerj-cs.104"
licence: ["GPL v3"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- kraken_report:
type: file
description: TSV file with six columns coming from kraken2 output
pattern: "*.{tsv}"
- database:
type: file
description: Directory containing the kraken2/Bracken files for analysis
pattern: "*"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- reports:
type: file
description: TSV output report of the re-estimated abundances
pattern: "*.{tsv}"
authors:
- "@Midnighter"

View file

@ -29,6 +29,7 @@ process STAR_ALIGN {
tuple val(meta), path('*fastq.gz') , optional:true, emit: fastq
tuple val(meta), path('*.tab') , optional:true, emit: tab
tuple val(meta), path('*.out.junction') , optional:true, emit: junction
tuple val(meta), path('*.out.sam') , optional:true, emit: sam
when:
task.ext.when == null || task.ext.when

View file

@ -2,10 +2,10 @@ process SVDB_MERGE {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::svdb=2.5.0" : null)
conda (params.enable_conda ? "bioconda::svdb=2.5.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/svdb:2.5.0--py39hcbe4a3b_0':
'quay.io/biocontainers/svdb:2.5.0--py39hcbe4a3b_0' }"
'https://depot.galaxyproject.org/singularity/svdb:2.5.2--py39h5371cbf_0':
'quay.io/biocontainers/svdb:2.5.2--py39h5371cbf_0' }"
input:
tuple val(meta), path(vcfs)

View file

@ -2,36 +2,73 @@ process SVDB_QUERY {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::svdb=2.5.0" : null)
conda (params.enable_conda ? "bioconda::svdb=2.5.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/svdb:2.5.0--py39hcbe4a3b_0':
'quay.io/biocontainers/svdb:2.5.0--py39hcbe4a3b_0' }"
'https://depot.galaxyproject.org/singularity/svdb:2.5.2--py39h5371cbf_0':
'quay.io/biocontainers/svdb:2.5.2--py39h5371cbf_0' }"
input:
tuple val(meta), path(vcf)
path (vcf_db)
val(in_occs)
val(in_frqs)
val(out_occs)
val(out_frqs)
path (vcf_dbs)
output:
tuple val(meta), path("*_ann_svdbq.vcf"), emit: vcf
tuple val(meta), path("*_query.vcf"), emit: vcf
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def in_occ = ""
def in_frq = ""
def out_occ = ""
def out_frq = ""
if (in_occs) {
in_occ = "--in_occ ${in_occs.join(',')}"
}
if (in_frqs) {
in_frq = "--in_frq ${in_frqs.join(',')}"
}
if (out_occs) {
out_occ = "--out_occ ${out_occs.join(',')}"
}
if (out_frqs) {
out_frq = "--out_frq ${out_frqs.join(',')}"
}
"""
svdb \\
--query \\
$in_occ \\
$in_frq \\
$out_occ \\
$out_frq \\
$args \\
--db $vcf_db \\
--db ${vcf_dbs.join(',')} \\
--query_vcf $vcf \\
>${prefix}_ann_svdbq.vcf
--prefix ${prefix}
cat <<-END_VERSIONS > versions.yml
"${task.process}":
svdb: \$( echo \$(svdb) | head -1 | sed 's/usage: SVDB-\\([0-9]\\.[0-9]\\.[0-9]\\).*/\\1/' )
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}_query.vcf
cat <<-END_VERSIONS > versions.yml
"${task.process}":
svdb: \$( echo \$(svdb) | head -1 | sed 's/usage: SVDB-\\([0-9]\\.[0-9]\\.[0-9]\\).*/\\1/' )
END_VERSIONS
"""
}

View file

@ -15,6 +15,12 @@ input:
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- in_occs:
type: list
description: A list of allele count tags
- in_frqs:
type: list
description: A list of allele frequency tags
- vcf:
type: file
description: query vcf file
@ -34,10 +40,16 @@ output:
type: file
description: File containing software versions
pattern: "versions.yml"
- out_occs:
type: list
description: A list of allele count tags
- out_frqs:
type: list
description: A list of allele frequency tags
- vcf:
type: file
description: Annotated output VCF file
pattern: "*_ann_svdbq.vcf"
pattern: "*_query.vcf"
authors:
- "@ramprasadn"

View file

@ -8,19 +8,19 @@ process UNTAR {
'biocontainers/biocontainers:v1.2.0_cv1' }"
input:
path archive
tuple val(meta), path(archive)
output:
path "$untar" , emit: untar
path "versions.yml", emit: versions
tuple val(meta), path("$untar"), emit: untar
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
untar = archive.toString() - '.tar.gz'
untar = archive.toString() - '.tar.gz'
"""
tar \\
-xzvf \\

View file

@ -10,11 +10,21 @@ tools:
documentation: https://www.gnu.org/software/tar/manual/
licence: ["GPL-3.0-or-later"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- archive:
type: file
description: File to be untar
pattern: "*.{tar}.{gz}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- untar:
type: file
description:

View file

@ -8,11 +8,11 @@ process UNZIP {
'quay.io/biocontainers/p7zip:15.09--h2d50403_4' }"
input:
path archive
tuple val(meta), path(archive)
output:
path "${archive.baseName}/", emit: unzipped_archive
path "versions.yml" , emit: versions
tuple val(meta), path("${archive.baseName}/"), emit: unzipped_archive
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when

View file

@ -12,12 +12,22 @@ tools:
licence: ["LGPL-2.1-or-later"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- archive:
type: file
description: ZIP file
pattern: "*.zip"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- unzipped_archive:
type: directory
description: Directory contents of the unzipped archive

View file

@ -14,6 +14,10 @@ adapterremoval:
- modules/adapterremoval/**
- tests/modules/adapterremoval/**
adapterremovalfixprefix:
- modules/adapterremovalfixprefix/**
- tests/modules/adapterremovalfixprefix/**
agrvate:
- modules/agrvate/**
- tests/modules/agrvate/**
@ -309,6 +313,10 @@ bowtie2/build:
- modules/bowtie2/build/**
- tests/modules/bowtie2/build_test/**
bracken/bracken:
- modules/bracken/bracken/**
- tests/modules/bracken/bracken/**
bwa/aln:
- modules/bwa/aln/**
- tests/modules/bwa/aln/**

View file

@ -25,6 +25,9 @@ params {
kraken2 = "${test_data_dir}/genomics/sarscov2/genome/db/kraken2"
kraken2_tar_gz = "${test_data_dir}/genomics/sarscov2/genome/db/kraken2.tar.gz"
kraken2_bracken = "${test_data_dir}/genomics/sarscov2/genome/db/kraken2_bracken"
kraken2_bracken_tar_gz = "${test_data_dir}/genomics/sarscov2/genome/db/kraken2_bracken.tar.gz"
ncbi_taxmap_zip = "${test_data_dir}/genomics/sarscov2/genome/db/maltextract/ncbi_taxmap.zip"
taxon_list_txt = "${test_data_dir}/genomics/sarscov2/genome/db/maltextract/taxon_list.txt"
@ -140,6 +143,7 @@ params {
syntheticvcf_short_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/syntheticvcf_short.vcf.gz.tbi"
syntheticvcf_short_score = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/syntheticvcf_short.score"
gnomad_r2_1_1_sv_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/gnomAD.r2.1.1-sv.vcf.gz"
gnomad2_r2_1_1_sv_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/gnomAD2.r2.1.1-sv.vcf.gz"
hapmap_3_3_hg38_21_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/germlineresources/hapmap_3.3.hg38.vcf.gz"
hapmap_3_3_hg38_21_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/germlineresources/hapmap_3.3.hg38.vcf.gz.tbi"

View file

@ -0,0 +1,19 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { ADAPTERREMOVAL } from '../../../modules/adapterremoval/main.nf'
include { ADAPTERREMOVALFIXPREFIX } from '../../../modules/adapterremovalfixprefix/main.nf'
workflow test_adapterremovalfixprefix {
input = [
[ id:'test', single_end:false ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true)
]
]
ADAPTERREMOVAL ( input, [] )
ADAPTERREMOVALFIXPREFIX ( ADAPTERREMOVAL.out.collapsed )
}

View file

@ -0,0 +1,9 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: ADAPTERREMOVAL {
ext.args = "--collapse"
}
}

View file

@ -0,0 +1,9 @@
- name: adapterremovalfixprefix test_adapterremovalfixprefix
command: nextflow run tests/modules/adapterremovalfixprefix -entry test_adapterremovalfixprefix -c tests/config/nextflow.config
tags:
- adapterremovalfixprefix
files:
- path: output/adapterremovalfixprefix/test.fq.gz
md5sum: ff956de3532599a56c3efe5369f0953f
- path: output/adapterremovalfixprefix/versions.yml
md5sum: 983cb58079bf015c1d489a7e48261746

View file

@ -15,18 +15,21 @@ workflow test_amps {
fastas = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
gff = []
seq_type = "DNA"
map_db = file("https://software-ab.informatik.uni-tuebingen.de/download/megan6/megan-nucl-Jan2021.db.zip", checkIfExists: true)
input = file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
map_db = [ [], file("https://software-ab.informatik.uni-tuebingen.de/download/megan6/megan-nucl-Jan2021.db.zip", checkIfExists: true) ]
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
mode = "BlastN"
taxon_list = file(params.test_data['sarscov2']['genome']['taxon_list_txt'], checkIfExists: true)
ncbi_dir = file(params.test_data['sarscov2']['genome']['ncbi_taxmap_zip'], checkIfExists: true)
filter = "def_anc"
ncbi_dir = [ [], file(params.test_data['sarscov2']['genome']['ncbi_taxmap_zip'], checkIfExists: true) ]
UNZIP_MALT ( map_db )
UNZIP_MALTEXTRACT ( ncbi_dir )
MALT_BUILD ( fastas, seq_type, gff, UNZIP_MALT.out.unzipped_archive )
MALT_BUILD ( fastas, seq_type, gff, UNZIP_MALT.out.unzipped_archive.map{ it[1] } )
MALT_RUN ( input, mode, MALT_BUILD.out.index )
MALTEXTRACT ( MALT_RUN.out.rma6, taxon_list, UNZIP_MALTEXTRACT.out.unzipped_archive)
ch_input_to_maltextract = MALT_RUN.out.rma6.map{ it[1] }
MALTEXTRACT ( ch_input_to_maltextract, taxon_list, UNZIP_MALTEXTRACT.out.unzipped_archive.map{ it[1] })
AMPS ( MALTEXTRACT.out.results, taxon_list, filter )
}

View file

@ -11,12 +11,12 @@ workflow test_artic_minion {
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['nanopore']['test_fastq_gz'], checkIfExists: true)
]
fast5_tar = file(params.test_data['sarscov2']['nanopore']['fast5_tar_gz'], checkIfExists: true)
fast5_tar = [ [], file(params.test_data['sarscov2']['nanopore']['fast5_tar_gz'], checkIfExists: true) ]
sequencing_summary = file(params.test_data['sarscov2']['nanopore']['test_sequencing_summary'], checkIfExists: true)
fasta = file('https://github.com/artic-network/primer-schemes/raw/master/nCoV-2019/V3/nCoV-2019.reference.fasta', checkIfExists: true)
bed = file('https://github.com/artic-network/primer-schemes/raw/master/nCoV-2019/V3/nCoV-2019.primer.bed', checkIfExists: true)
fast5_dir = UNTAR ( fast5_tar ).untar
fast5_dir = UNTAR ( fast5_tar ).untar.map{ it[1] }
ARTIC_MINION ( input, fast5_dir, sequencing_summary, fasta, bed, [], '', 'nCoV-2019', '3')
}

View file

@ -0,0 +1,57 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { UNTAR } from '../../../../modules/untar/main.nf'
include { KRAKEN2_KRAKEN2 } from '../../../../modules/kraken2/kraken2/main.nf'
include { BRACKEN_BRACKEN } from '../../../../modules/bracken/bracken/main.nf'
workflow test_bracken_bracken_single_end_default_args {
input = [ [ id:'test', single_end:true ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) ]
]
db = file(params.test_data['sarscov2']['genome']['kraken2_bracken_tar_gz'], checkIfExists: true)
ch_db = UNTAR ( [[:], db] ).untar
.map { it[1] }
KRAKEN2_KRAKEN2 ( input, ch_db )
BRACKEN_BRACKEN ( KRAKEN2_KRAKEN2.out.txt, ch_db )
}
workflow test_bracken_bracken_single_end_custom_args {
input = [ [ id:'test', single_end:true, threshold:0, taxonomic_level:'G', read_length:100 ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) ]
]
db = file(params.test_data['sarscov2']['genome']['kraken2_bracken_tar_gz'], checkIfExists: true)
ch_db = UNTAR ( [[:], db] ).untar
.map { it[1] }
KRAKEN2_KRAKEN2 ( input, ch_db )
BRACKEN_BRACKEN ( KRAKEN2_KRAKEN2.out.txt, ch_db )
}
workflow test_bracken_bracken_paired_end_default_args {
input = [ [ id:'test', single_end:false ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
]
db = file(params.test_data['sarscov2']['genome']['kraken2_bracken_tar_gz'], checkIfExists: true)
ch_db = UNTAR ( [[:], db] ).untar
.map { it[1] }
KRAKEN2_KRAKEN2 ( input, ch_db )
BRACKEN_BRACKEN ( KRAKEN2_KRAKEN2.out.txt, ch_db )
}
workflow test_bracken_bracken_paired_end_custom_args {
input = [ [ id:'test', single_end:false, threshold:0, taxonomic_level:'G', read_length:100 ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
]
db = file(params.test_data['sarscov2']['genome']['kraken2_bracken_tar_gz'], checkIfExists: true)
ch_db = UNTAR ( [[:], db] ).untar
.map { it[1] }
KRAKEN2_KRAKEN2 ( input, ch_db )
BRACKEN_BRACKEN ( KRAKEN2_KRAKEN2.out.txt, ch_db )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,43 @@
- name: bracken bracken test_bracken_bracken_single_end_default_args
command: nextflow run tests/modules/bracken/bracken -entry test_bracken_bracken_single_end_default_args -c tests/config/nextflow.config
tags:
- bracken/bracken
- bracken
files:
- path: output/bracken/test_S.tsv
md5sum: 4a21ae14ff8a0311d55f139af5247838
- path: output/bracken/versions.yml
md5sum: ab8b1550f84a99bae80f050fe718abd0
- name: bracken bracken test_bracken_bracken_single_end_custom_args
command: nextflow run tests/modules/bracken/bracken -entry test_bracken_bracken_single_end_custom_args -c tests/config/nextflow.config
tags:
- bracken/bracken
- bracken
files:
- path: output/bracken/test_G.tsv
md5sum: f609b09d6edb5ebc1ea1435d1dd46cde
- path: output/bracken/versions.yml
md5sum: af87e8d4c42fbcb0469ab13912b8a9bd
- name: bracken bracken test_bracken_bracken_paired_end_default_args
command: nextflow run tests/modules/bracken/bracken -entry test_bracken_bracken_paired_end_default_args -c tests/config/nextflow.config
tags:
- bracken/bracken
- bracken
files:
- path: output/bracken/test_S.tsv
md5sum: 4a21ae14ff8a0311d55f139af5247838
- path: output/bracken/versions.yml
md5sum: 4602111eb25bd19a7f9d725acc5921f6
- name: bracken bracken test_bracken_bracken_paired_end_custom_args
command: nextflow run tests/modules/bracken/bracken -entry test_bracken_bracken_paired_end_custom_args -c tests/config/nextflow.config
tags:
- bracken/bracken
- bracken
files:
- path: output/bracken/test_G.tsv
md5sum: f609b09d6edb5ebc1ea1435d1dd46cde
- path: output/bracken/versions.yml
md5sum: d4618b01df5ac09cc366fe2ae7c13f06

View file

@ -8,19 +8,19 @@ include { CELLRANGER_MKFASTQ } from '../../../../modules/cellranger/mkfastq/main
workflow test_cellranger_mkfastq_simple {
simple_csv = file("https://cf.10xgenomics.com/supp/cell-exp/cellranger-tiny-bcl-simple-1.2.0.csv", checkIfExists: true)
tiny_bcl = file("https://cf.10xgenomics.com/supp/cell-exp/cellranger-tiny-bcl-1.2.0.tar.gz", checkIfExists: true)
tiny_bcl = [ [], file("https://cf.10xgenomics.com/supp/cell-exp/cellranger-tiny-bcl-1.2.0.tar.gz", checkIfExists: true) ]
UNTAR ( tiny_bcl )
CELLRANGER_MKFASTQ ( UNTAR.out.untar, simple_csv)
CELLRANGER_MKFASTQ ( UNTAR.out.untar.map{ it[1] }, simple_csv)
}
workflow test_cellranger_mkfastq_illumina {
samplesheet_csv = file("https://cf.10xgenomics.com/supp/cell-exp/cellranger-tiny-bcl-samplesheet-1.2.0.csv", checkIfExists: true)
tiny_bcl = file("https://cf.10xgenomics.com/supp/cell-exp/cellranger-tiny-bcl-1.2.0.tar.gz", checkIfExists: true)
tiny_bcl = [ [], file("https://cf.10xgenomics.com/supp/cell-exp/cellranger-tiny-bcl-1.2.0.tar.gz", checkIfExists: true) ]
UNTAR ( tiny_bcl )
CELLRANGER_MKFASTQ ( UNTAR.out.untar, samplesheet_csv)
CELLRANGER_MKFASTQ ( UNTAR.out.untar.map{ it[1] }, samplesheet_csv)
}

View file

@ -19,7 +19,7 @@ workflow test_controlfreec {
dbsnp = file(params.test_data['homo_sapiens']['genome']['dbsnp_138_hg38_21_vcf_gz'], checkIfExists: true)
dbsnp_tbi = file(params.test_data['homo_sapiens']['genome']['dbsnp_138_hg38_21_vcf_gz_tbi'], checkIfExists: true)
chrfiles = file(params.test_data['homo_sapiens']['genome']['genome_21_chromosomes_dir'], checkIfExists: true)
chrfiles = [ [], file(params.test_data['homo_sapiens']['genome']['genome_21_chromosomes_dir'], checkIfExists: true) ]
target_bed = file(params.test_data['homo_sapiens']['genome']['genome_21_multi_interval_bed'], checkIfExists: true)
UNTAR(chrfiles)
@ -29,7 +29,7 @@ workflow test_controlfreec {
[],
dbsnp,
dbsnp_tbi,
UNTAR.out.untar,
UNTAR.out.untar.map{ it[1] },
[],
target_bed,
[]

View file

@ -6,12 +6,12 @@ include { UNTAR } from '../../../../modules/untar/main.nf'
include { GATK4_CREATESOMATICPANELOFNORMALS } from '../../../../modules/gatk4/createsomaticpanelofnormals/main.nf'
workflow test_gatk4_createsomaticpanelofnormals {
db = file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true)
db = [[], file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
input = Channel.of([ id:'test'])
.combine(UNTAR.out.untar)
.combine(UNTAR.out.untar.map{ it[1] })
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
fastaidx = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)

View file

@ -22,7 +22,7 @@ workflow test_gatk4_genomicsdbimport_create_genomicsdb {
}
workflow test_gatk4_genomicsdbimport_get_intervalslist {
db = file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true)
db = [ [], file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
@ -31,7 +31,7 @@ workflow test_gatk4_genomicsdbimport_get_intervalslist {
[] ,
[] ,
[] ])
.combine(UNTAR.out.untar)
.combine(UNTAR.out.untar.map{ it[1] })
run_intlist = true
run_updatewspace = false
@ -41,7 +41,7 @@ workflow test_gatk4_genomicsdbimport_get_intervalslist {
}
workflow test_gatk4_genomicsdbimport_update_genomicsdb {
db = file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true)
db = [ [], file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
@ -50,7 +50,7 @@ workflow test_gatk4_genomicsdbimport_update_genomicsdb {
file( params.test_data['homo_sapiens']['illumina']['test2_genome_vcf_gz_tbi'] , checkIfExists: true) ,
[] ,
[] ])
.combine(UNTAR.out.untar)
.combine(UNTAR.out.untar.map{ it[1] })
run_intlist = false
run_updatewspace = true

View file

@ -97,10 +97,10 @@ workflow test_gatk4_genotypegvcfs_gendb_input {
fastaIndex = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
fastaDict = file(params.test_data['homo_sapiens']['genome']['genome_dict'], checkIfExists: true)
test_genomicsdb = file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true)
test_genomicsdb = [ [], file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true) ]
UNTAR ( test_genomicsdb )
gendb = UNTAR.out.untar.collect()
gendb = UNTAR.out.untar.map{ it[1] }.collect()
gendb.add([])
gendb.add([])
@ -119,10 +119,10 @@ workflow test_gatk4_genotypegvcfs_gendb_input_dbsnp {
dbsnp = file(params.test_data['homo_sapiens']['genome']['dbsnp_146_hg38_vcf_gz'], checkIfExists: true)
dbsnpIndex = file(params.test_data['homo_sapiens']['genome']['dbsnp_146_hg38_vcf_gz_tbi'], checkIfExists: true)
test_genomicsdb = file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true)
test_genomicsdb = [ [], file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true) ]
UNTAR ( test_genomicsdb )
gendb = UNTAR.out.untar.collect()
gendb = UNTAR.out.untar.map{ it[1] }.collect()
gendb.add([])
gendb.add([])
input = Channel.of([ id:'test' ]).combine(gendb)
@ -137,10 +137,10 @@ workflow test_gatk4_genotypegvcfs_gendb_input_intervals {
fastaIndex = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
fastaDict = file(params.test_data['homo_sapiens']['genome']['genome_dict'], checkIfExists: true)
test_genomicsdb = file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true)
test_genomicsdb = [ [], file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true) ]
UNTAR ( test_genomicsdb )
gendb = UNTAR.out.untar.collect()
gendb = UNTAR.out.untar.map{ it[1] }.collect()
gendb.add([])
gendb.add([file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true)])
input = Channel.of([ id:'test' ]).combine(gendb)
@ -158,10 +158,10 @@ workflow test_gatk4_genotypegvcfs_gendb_input_dbsnp_intervals {
dbsnp = file(params.test_data['homo_sapiens']['genome']['dbsnp_146_hg38_vcf_gz'], checkIfExists: true)
dbsnpIndex = file(params.test_data['homo_sapiens']['genome']['dbsnp_146_hg38_vcf_gz_tbi'], checkIfExists: true)
test_genomicsdb = file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true)
test_genomicsdb = [ [], file(params.test_data['homo_sapiens']['illumina']['test_genomicsdb_tar_gz'], checkIfExists: true) ]
UNTAR ( test_genomicsdb )
gendb = UNTAR.out.untar.collect()
gendb = UNTAR.out.untar.map{ it[1] }.collect()
gendb.add([])
gendb.add([file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true)])
input = Channel.of([ id:'test' ]).combine(gendb)

View file

@ -9,10 +9,10 @@ workflow test_kraken2_kraken2_single_end {
input = [ [ id:'test', single_end:true ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) ]
]
db = file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true)
db = [ [], file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] } )
}
workflow test_kraken2_kraken2_paired_end {
@ -20,8 +20,8 @@ workflow test_kraken2_kraken2_paired_end {
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
]
db = file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true)
db = [ [], file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] } )
}

View file

@ -10,10 +10,10 @@ workflow test_last_lastal_with_dummy_param_file {
input = [ [ id:'contigs', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['contigs_fasta'], checkIfExists: true),
[] ]
db = [ file(params.test_data['sarscov2']['genome']['lastdb_tar_gz'], checkIfExists: true) ]
db = [ [], file(params.test_data['sarscov2']['genome']['lastdb_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
LAST_LASTAL ( input, UNTAR.out.untar)
LAST_LASTAL ( input, UNTAR.out.untar.map{ it[1] })
}
workflow test_last_lastal_with_real_param_file {
@ -21,8 +21,8 @@ workflow test_last_lastal_with_real_param_file {
input = [ [ id:'contigs', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['contigs_fasta'], checkIfExists: true),
file(params.test_data['sarscov2']['genome']['contigs_genome_par'], checkIfExists: true) ]
db = [ file(params.test_data['sarscov2']['genome']['lastdb_tar_gz'], checkIfExists: true) ]
db = [ [], file(params.test_data['sarscov2']['genome']['lastdb_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
LAST_LASTAL ( input, UNTAR.out.untar)
LAST_LASTAL ( input, UNTAR.out.untar.map{ it[1] })
}

View file

@ -7,9 +7,9 @@ include { LAST_TRAIN } from '../../../../modules/last/train/main.nf'
workflow test_last_train {
db = [ file(params.test_data['sarscov2']['genome']['lastdb_tar_gz'], checkIfExists: true) ]
db = [ [], file(params.test_data['sarscov2']['genome']['lastdb_tar_gz'], checkIfExists: true) ]
input = [ [ id:'contigs' ], // meta map
file(params.test_data['sarscov2']['illumina']['contigs_fasta'], checkIfExists: true) ]
UNTAR ( db )
LAST_TRAIN ( input, UNTAR.out.untar )
LAST_TRAIN ( input, UNTAR.out.untar.map{ it[1] } )
}

View file

@ -9,18 +9,18 @@ workflow test_malt_build {
fastas = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
seq_type = "DNA"
gff = []
map_db = file("https://software-ab.informatik.uni-tuebingen.de/download/megan6/megan-nucl-Jan2021.db.zip", checkIfExists: true)
map_db = [ [], file("https://software-ab.informatik.uni-tuebingen.de/download/megan6/megan-nucl-Jan2021.db.zip", checkIfExists: true) ]
UNZIP ( map_db )
MALT_BUILD ( fastas, seq_type, gff, UNZIP.out.unzipped_archive )
MALT_BUILD ( fastas, seq_type, gff, UNZIP.out.unzipped_archive.map{ it[1] } )
}
workflow test_malt_build_gff {
fastas = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
seq_type = "DNA"
gff = file(params.test_data['sarscov2']['genome']['genome_gff3'], checkIfExists: true)
map_db = file("https://software-ab.informatik.uni-tuebingen.de/download/megan6/megan-nucl-Jan2021.db.zip", checkIfExists: true)
map_db = [ [], file("https://software-ab.informatik.uni-tuebingen.de/download/megan6/megan-nucl-Jan2021.db.zip", checkIfExists: true) ]
UNZIP ( map_db )
MALT_BUILD ( fastas, seq_type, gff, UNZIP.out.unzipped_archive )
MALT_BUILD ( fastas, seq_type, gff, UNZIP.out.unzipped_archive.map{ it[1] } )
}

View file

@ -2,16 +2,16 @@
nextflow.enable.dsl = 2
include { UNZIP } from '../../../../modules/unzip/main.nf'
include { UNZIP } from '../../../../modules/unzip/main.nf'
include { MALT_BUILD } from '../../../../modules/malt/build/main.nf'
include { MALT_RUN } from '../../../../modules/malt/run/main.nf'
include { MALT_RUN } from '../../../../modules/malt/run/main.nf'
workflow test_malt_run {
fastas = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
gff = file(params.test_data['sarscov2']['genome']['genome_gff3'], checkIfExists: true)
seq_type = "DNA"
map_db = file("https://software-ab.informatik.uni-tuebingen.de/download/megan6/megan-nucl-Jan2021.db.zip", checkIfExists: true)
map_db = [ [], file("https://software-ab.informatik.uni-tuebingen.de/download/megan6/megan-nucl-Jan2021.db.zip", checkIfExists: true) ]
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
@ -19,7 +19,7 @@ workflow test_malt_run {
mode = "BlastN"
UNZIP ( map_db )
MALT_BUILD ( fastas, seq_type, gff, UNZIP.out.unzipped_archive )
MALT_BUILD ( fastas, seq_type, gff, UNZIP.out.unzipped_archive.map { it[1] } )
MALT_RUN ( input, mode, MALT_BUILD.out.index )
}

View file

@ -4,24 +4,28 @@ nextflow.enable.dsl = 2
include { UNZIP as UNZIP_MALT } from '../../../modules/unzip/main.nf'
include { UNZIP as UNZIP_MALTEXTRACT } from '../../../modules/unzip/main.nf'
include { MALT_BUILD } from '../../../modules/malt/build/main.nf'
include { MALT_RUN } from '../../../modules/malt/run/main.nf'
include { MALTEXTRACT } from '../../../modules/maltextract/main.nf'
include { MALT_BUILD } from '../../../modules/malt/build/main.nf'
include { MALT_RUN } from '../../../modules/malt/run/main.nf'
include { MALTEXTRACT } from '../../../modules/maltextract/main.nf'
workflow test_maltextract {
fastas = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
gff = []
seq_type = "DNA"
map_db = file("https://software-ab.informatik.uni-tuebingen.de/download/megan6/megan-nucl-Jan2021.db.zip", checkIfExists: true)
input = file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
map_db = [ [], file("https://software-ab.informatik.uni-tuebingen.de/download/megan6/megan-nucl-Jan2021.db.zip", checkIfExists: true) ]
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
mode = "BlastN"
taxon_list = file(params.test_data['sarscov2']['genome']['taxon_list_txt'], checkIfExists: true)
ncbi_dir = file(params.test_data['sarscov2']['genome']['ncbi_taxmap_zip'], checkIfExists: true)
ncbi_dir = [ [], file(params.test_data['sarscov2']['genome']['ncbi_taxmap_zip'], checkIfExists: true) ]
UNZIP_MALT ( map_db )
UNZIP_MALTEXTRACT ( ncbi_dir )
MALT_BUILD ( fastas, seq_type, gff, UNZIP_MALT.out.unzipped_archive )
MALT_BUILD ( fastas, seq_type, gff, UNZIP_MALT.out.unzipped_archive.map{ it[1] } )
MALT_RUN ( input, mode, MALT_BUILD.out.index )
MALTEXTRACT ( MALT_RUN.out.rma6, taxon_list, UNZIP_MALTEXTRACT.out.unzipped_archive)
ch_input_to_maltextract = MALT_RUN.out.rma6.map{ it[1] }
MALTEXTRACT ( ch_input_to_maltextract, taxon_list, UNZIP_MALTEXTRACT.out.unzipped_archive.map{ it[1] })
}

View file

@ -3,8 +3,6 @@
tags:
- maltextract
files:
- path: output/maltextract/results/error.txt
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: output/maltextract/results/error.txt
- path: output/maltextract/results/log.txt
contains:

View file

@ -12,10 +12,10 @@ workflow test_metaphlan3_single_end {
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) ]
]
db = channel.fromPath('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/metaphlan_database.tar.gz', type: 'dir', checkIfExists: true)
db = [ [], file('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/metaphlan_database.tar.gz', checkIfExists: true) ]
UNTAR ( db )
METAPHLAN3 ( input, UNTAR.out.untar )
METAPHLAN3 ( input, UNTAR.out.untar.map{ it[1] } )
}
workflow test_metaphlan3_paired_end {
@ -25,11 +25,10 @@ workflow test_metaphlan3_paired_end {
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
]
db = channel.fromPath('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/metaphlan_database.tar.gz', type: 'dir', checkIfExists: true)
db = [ [], file('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/metaphlan_database.tar.gz', checkIfExists: true) ]
UNTAR ( db )
METAPHLAN3 ( input, UNTAR.out.untar )
METAPHLAN3 ( input, UNTAR.out.untar.map{ it[1] } )
}
workflow test_metaphlan3_sam {
@ -38,12 +37,11 @@ workflow test_metaphlan3_sam {
[ file(params.test_data['sarscov2']['illumina']['test_single_end_bam'], checkIfExists: true) ]
]
db = channel.fromPath('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/metaphlan_database.tar.gz', type: 'dir', checkIfExists: true)
db = [ [], file('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/metaphlan_database.tar.gz', checkIfExists: true) ]
UNTAR ( db )
SAMTOOLS_VIEW ( input, [] )
METAPHLAN3 ( SAMTOOLS_VIEW.out.bam, UNTAR.out.untar )
METAPHLAN3 ( SAMTOOLS_VIEW.out.bam, UNTAR.out.untar.map{ it[1] } )
}
workflow test_metaphlan3_fasta {
@ -52,8 +50,8 @@ workflow test_metaphlan3_fasta {
[ file(params.test_data['sarscov2']['genome']['transcriptome_fasta'], checkIfExists: true) ]
]
db = channel.fromPath('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/metaphlan_database.tar.gz', type: 'dir', checkIfExists: true)
db = [ [], file('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/metaphlan_database.tar.gz', checkIfExists: true) ]
UNTAR ( db )
METAPHLAN3 ( input, UNTAR.out.untar )
METAPHLAN3 ( input, UNTAR.out.untar.map{ it[1] } )
}

View file

@ -7,22 +7,22 @@ include { SRATOOLS_FASTERQDUMP } from '../../../../modules/sratools/fasterqdump/
workflow test_sratools_fasterqdump_single_end {
archive = file(params.test_data['sarscov2']['illumina']['SRR13255544_tar_gz'], checkIfExists: true)
archive = [ [], file(params.test_data['sarscov2']['illumina']['SRR13255544_tar_gz'], checkIfExists: true) ]
UNTAR ( archive )
def input = Channel.of([ id:'test_single_end', single_end:true ])
.combine(UNTAR.out.untar)
.combine(UNTAR.out.untar.map{ it[1] })
SRATOOLS_FASTERQDUMP ( input )
}
workflow test_sratools_fasterqdump_paired_end {
archive = file(params.test_data['sarscov2']['illumina']['SRR11140744_tar_gz'], checkIfExists: true)
archive = [ [], file(params.test_data['sarscov2']['illumina']['SRR11140744_tar_gz'], checkIfExists: true) ]
UNTAR ( archive )
def input = Channel.of([ id:'test_paired_end', single_end:false ])
.combine(UNTAR.out.untar)
.combine(UNTAR.out.untar.map{ it[1] })
SRATOOLS_FASTERQDUMP ( input )
}

View file

@ -14,5 +14,29 @@ workflow test_svdb_query {
file(params.test_data['homo_sapiens']['genome']['gnomad_r2_1_1_sv_vcf_gz'], checkIfExists: true)
]
SVDB_QUERY ( input, vcf_db )
in_occs = ['AC']
in_frqs = ['AF']
out_occs = ['gnomad_svAC']
out_frqs = ['gnomad_svAF']
SVDB_QUERY ( input, in_occs, in_frqs, out_occs, out_frqs, vcf_db )
}
workflow test_svdb_query_multiple {
input = [ [ id:'test' ], // meta map
[ file(params.test_data['homo_sapiens']['illumina']['test_sv_vcf'], checkIfExists: true) ]
]
vcf_db = [
file(params.test_data['homo_sapiens']['genome']['gnomad_r2_1_1_sv_vcf_gz'], checkIfExists: true),
file(params.test_data['homo_sapiens']['genome']['gnomad2_r2_1_1_sv_vcf_gz'], checkIfExists: true)
]
in_occs = ['AC','AC']
in_frqs = ['AF','AF']
out_occs = ['gnomad_svAC','gnomad_svAC']
out_frqs = ['gnomad_svAF','gnomad_svAF']
SVDB_QUERY ( input, in_occs, in_frqs, out_occs, out_frqs, vcf_db )
}

View file

@ -2,8 +2,4 @@ process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: SVDB_QUERY {
ext.args = '--in_occ AC --out_occ gnomad_svAC --in_frq AF --out_frq gnomad_svAF'
}
}

View file

@ -4,4 +4,12 @@
- svdb
- svdb/query
files:
- path: output/svdb/test_ann_svdbq.vcf
- path: output/svdb/test_query.vcf
- name: svdb query multiple
command: nextflow run ./tests/modules/svdb/query -entry test_svdb_query_multiple -c ./tests/config/nextflow.config -c ./tests/modules/svdb/query/nextflow.config
tags:
- svdb
- svdb/query
files:
- path: output/svdb/test_query.vcf

View file

@ -5,7 +5,10 @@ nextflow.enable.dsl = 2
include { UNTAR } from '../../../modules/untar/main.nf'
workflow test_untar {
input = file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true)
input = [
[],
file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true)
]
UNTAR ( input )
}

View file

@ -6,7 +6,10 @@ include { UNZIP } from '../../../modules/unzip/main.nf'
workflow test_unzip {
archive = file(params.test_data['sarscov2']['genome']['ncbi_taxmap_zip'], checkIfExists: true)
archive = [
[],
file(params.test_data['sarscov2']['genome']['ncbi_taxmap_zip'], checkIfExists: true)
]
UNZIP ( archive )
}

View file

@ -6,36 +6,36 @@ include { UNTAR } from '../../../modules/untar/main.nf'
include { VCFANNO } from '../../../modules/vcfanno/main.nf'
workflow test_vcfanno {
input = [
input = [
[ id:'test_compressed', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_vcf_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_vcf_gz_tbi'], checkIfExists: true)
]
input_2 = [ [ id:'test_compressed', single_end:false ], // meta map
input_2 = [ [ id:'test_compressed', single_end:false ], // meta map
[] ]
toml = file(params.test_data['homo_sapiens']['genome']['vcfanno_toml'], checkIfExists: true)
resource_dir = file(params.test_data['homo_sapiens']['genome']['vcfanno_tar_gz'], checkIfExists: true)
resource_dir = [[], file(params.test_data['homo_sapiens']['genome']['vcfanno_tar_gz'], checkIfExists: true) ]
UNTAR ( resource_dir )
VCFANNO ( input, input_2, toml, UNTAR.out.untar )
VCFANNO ( input, input_2, toml, UNTAR.out.untar.map{ it[1] } )
}
workflow test_vcfanno_uncompressed {
input = [ [ id:'test_uncompressed', single_end:false ], // meta map
[] ,[] ]
input_2 = [
input_2 = [
[ id:'test_uncompressed', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_vcf'], checkIfExists: true)
]
toml = file(params.test_data['homo_sapiens']['genome']['vcfanno_toml'], checkIfExists: true)
resource_dir = file(params.test_data['homo_sapiens']['genome']['vcfanno_tar_gz'], checkIfExists: true)
resource_dir = [[], file(params.test_data['homo_sapiens']['genome']['vcfanno_tar_gz'], checkIfExists: true) ]
UNTAR ( resource_dir )
VCFANNO ( input, input_2, toml, UNTAR.out.untar )
}
VCFANNO ( input, input_2, toml, UNTAR.out.untar.map{ it[1] } )
}