Merge branch 'master' into motus_profile

This commit is contained in:
JIANHONG OU 2022-05-13 09:30:20 -04:00 committed by GitHub
commit fadb83cf44
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
82 changed files with 1723 additions and 390 deletions

View file

@ -2,15 +2,20 @@ process ARRIBA {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::arriba=2.1.0" : null)
conda (params.enable_conda ? "bioconda::arriba=2.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/arriba:2.1.0--h3198e80_1' :
'quay.io/biocontainers/arriba:2.1.0--h3198e80_1' }"
'https://depot.galaxyproject.org/singularity/arriba:2.2.1--hecb563c_2' :
'quay.io/biocontainers/arriba:2.2.1--hecb563c_2' }"
input:
tuple val(meta), path(bam)
path fasta
path gtf
path blacklist
path known_fusions
path structural_variants
path tags
path protein_domains
output:
tuple val(meta), path("*.fusions.tsv") , emit: fusions
@ -23,7 +28,12 @@ process ARRIBA {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def blacklist = (args.contains('-b')) ? '' : '-f blacklist'
def blacklist = blacklist ? "-b $blacklist" : "-f blacklist"
def known_fusions = known_fusions ? "-k $known_fusions" : ""
def structural_variants = structural_variants ? "-d $structual_variants" : ""
def tags = tags ? "-t $tags" : ""
def protein_domains = protein_domains ? "-p $protein_domains" : ""
"""
arriba \\
-x $bam \\
@ -32,6 +42,10 @@ process ARRIBA {
-o ${prefix}.fusions.tsv \\
-O ${prefix}.fusions.discarded.tsv \\
$blacklist \\
$known_fusions \\
$structural_variants \\
$tags \\
$protein_domains \\
$args
cat <<-END_VERSIONS > versions.yml
@ -39,4 +53,14 @@ process ARRIBA {
arriba: \$(arriba -h | grep 'Version:' 2>&1 | sed 's/Version:\s//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
echo stub > ${prefix}.fusions.tsv
echo stub > ${prefix}.fusions.discarded.tsv
echo "${task.process}:" > versions.yml
echo ' arriba: 2.2.1' >> versions.yml
"""
}

View file

@ -30,6 +30,26 @@ input:
type: file
description: Annotation GTF file
pattern: "*.{gtf}"
- blacklist:
type: file
description: Blacklist file
pattern: "*.{tsv}"
- known_fusions:
type: file
description: Known fusions file
pattern: "*.{tsv}"
- structural_variants:
type: file
description: Structural variants file
pattern: "*.{tsv}"
- tags:
type: file
description: Tags file
pattern: "*.{tsv}"
- protein_domains:
type: file
description: Protein domains file
pattern: "*.{gff3}"
output:
- meta:
@ -51,4 +71,4 @@ output:
pattern: "*.{fusions.discarded.tsv}"
authors:
- "@praveenraj2018"
- "@praveenraj2018,@rannick"

View file

@ -1,11 +1,11 @@
process BOWTIE2_ALIGN {
tag "$meta.id"
label 'process_high'
label "process_high"
conda (params.enable_conda ? 'bioconda::bowtie2=2.4.4 bioconda::samtools=1.15.1 conda-forge::pigz=2.6' : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:1744f68fe955578c63054b55309e05b41c37a80d-0' :
'quay.io/biocontainers/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:1744f68fe955578c63054b55309e05b41c37a80d-0' }"
conda (params.enable_conda ? "bioconda::bowtie2=2.4.4 bioconda::samtools=1.15.1 conda-forge::pigz=2.6" : null)
container "${ workflow.containerEngine == "singularity" && !task.ext.singularity_pull_docker_container ?
"https://depot.galaxyproject.org/singularity/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:1744f68fe955578c63054b55309e05b41c37a80d-0" :
"quay.io/biocontainers/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:1744f68fe955578c63054b55309e05b41c37a80d-0" }"
input:
tuple val(meta), path(reads)
@ -13,69 +13,59 @@ process BOWTIE2_ALIGN {
val save_unaligned
output:
tuple val(meta), path('*.bam') , emit: bam
tuple val(meta), path('*.log') , emit: log
tuple val(meta), path('*fastq.gz'), emit: fastq, optional:true
tuple val(meta), path("*.bam") , emit: bam
tuple val(meta), path("*.log") , emit: log
tuple val(meta), path("*fastq.gz"), emit: fastq, optional:true
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def args = task.ext.args ?: ""
def args2 = task.ext.args2 ?: ""
def prefix = task.ext.prefix ?: "${meta.id}"
def unaligned = ""
def reads_args = ""
if (meta.single_end) {
def unaligned = save_unaligned ? "--un-gz ${prefix}.unmapped.fastq.gz" : ''
"""
INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
[ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed 's/.rev.1.bt2l//'`
[ -z "\$INDEX" ] && echo "BT2 index files not found" 1>&2 && exit 1
bowtie2 \\
-x \$INDEX \\
-U $reads \\
--threads $task.cpus \\
$unaligned \\
$args \\
2> ${prefix}.bowtie2.log \\
| samtools view -@ $task.cpus $args2 -bhS -o ${prefix}.bam -
cat <<-END_VERSIONS > versions.yml
"${task.process}":
bowtie2: \$(echo \$(bowtie2 --version 2>&1) | sed 's/^.*bowtie2-align-s version //; s/ .*\$//')
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
pigz: \$( pigz --version 2>&1 | sed 's/pigz //g' )
END_VERSIONS
"""
unaligned = save_unaligned ? "--un-gz ${prefix}.unmapped.fastq.gz" : ""
reads_args = "-U ${reads}"
} else {
def unaligned = save_unaligned ? "--un-conc-gz ${prefix}.unmapped.fastq.gz" : ''
"""
INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
[ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed 's/.rev.1.bt2l//'`
[ -z "\$INDEX" ] && echo "BT2 index files not found" 1>&2 && exit 1
bowtie2 \\
-x \$INDEX \\
-1 ${reads[0]} \\
-2 ${reads[1]} \\
--threads $task.cpus \\
$unaligned \\
$args \\
2> ${prefix}.bowtie2.log \\
| samtools view -@ $task.cpus $args2 -bhS -o ${prefix}.bam -
if [ -f ${prefix}.unmapped.fastq.1.gz ]; then
mv ${prefix}.unmapped.fastq.1.gz ${prefix}.unmapped_1.fastq.gz
fi
if [ -f ${prefix}.unmapped.fastq.2.gz ]; then
mv ${prefix}.unmapped.fastq.2.gz ${prefix}.unmapped_2.fastq.gz
fi
cat <<-END_VERSIONS > versions.yml
"${task.process}":
bowtie2: \$(echo \$(bowtie2 --version 2>&1) | sed 's/^.*bowtie2-align-s version //; s/ .*\$//')
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
pigz: \$( pigz --version 2>&1 | sed 's/pigz //g' )
END_VERSIONS
"""
unaligned = save_unaligned ? "--un-conc-gz ${prefix}.unmapped.fastq.gz" : ""
reads_args = "-1 ${reads[0]} -2 ${reads[1]}"
}
def samtools_command = "samtools view -@ $task.cpus --bam --with-header ${args2} > ${prefix}.bam"
"""
INDEX=`find -L ./ -name "*.rev.1.bt2" | sed "s/.rev.1.bt2//"`
[ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed "s/.rev.1.bt2l//"`
[ -z "\$INDEX" ] && echo "Bowtie2 index files not found" 1>&2 && exit 1
bowtie2 \\
-x \$INDEX \\
$reads_args \\
--threads $task.cpus \\
$unaligned \\
$args \\
2> ${prefix}.bowtie2.log \\
| $samtools_command
if [ -f ${prefix}.unmapped.fastq.1.gz ]; then
mv ${prefix}.unmapped.fastq.1.gz ${prefix}.unmapped_1.fastq.gz
fi
if [ -f ${prefix}.unmapped.fastq.2.gz ]; then
mv ${prefix}.unmapped.fastq.2.gz ${prefix}.unmapped_2.fastq.gz
fi
cat <<-END_VERSIONS > versions.yml
"${task.process}":
bowtie2: \$(echo \$(bowtie2 --version 2>&1) | sed 's/^.*bowtie2-align-s version //; s/ .*\$//')
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
pigz: \$( pigz --version 2>&1 | sed 's/pigz //g' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,20 @@
process CUSTOM_SRATOOLSNCBISETTINGS {
tag 'ncbi-settings'
label 'process_low'
conda (params.enable_conda ? 'bioconda::sra-tools=2.11.0' : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/sra-tools:2.11.0--pl5321ha49a11a_3' :
'quay.io/biocontainers/sra-tools:2.11.0--pl5321ha49a11a_3' }"
output:
path('*.mkfg') , emit: ncbi_settings
path 'versions.yml', emit: versions
when:
task.ext.when == null || task.ext.when
shell:
config = "/LIBS/GUID = \"${UUID.randomUUID().toString()}\"\\n/libs/cloud/report_instance_identity = \"true\"\\n"
template 'detect_ncbi_settings.sh'
}

View file

@ -0,0 +1,28 @@
name: "sratoolsncbisettings"
description: Test for the presence of suitable NCBI settings or create them on the fly.
keywords:
- NCBI
- settings
- sra-tools
- prefetch
- fasterq-dump
tools:
- "sratools":
description: "SRA Toolkit and SDK from NCBI"
homepage: https://github.com/ncbi/sra-tools
documentation: https://github.com/ncbi/sra-tools/wiki
tool_dev_url: https://github.com/ncbi/sra-tools
licence: "['Public Domain']"
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- ncbi_settings:
type: file
description: An NCBI user settings file.
pattern: "*.mkfg"
authors:
- "@Midnighter"

View file

@ -0,0 +1,45 @@
#!/usr/bin/env bash
set -u
# Get the expected NCBI settings path and define the environment variable
# `NCBI_SETTINGS`.
eval "$(vdb-config -o n NCBI_SETTINGS | sed 's/[" ]//g')"
# If the user settings do not exist yet, create a file suitable for `prefetch`
# and `fasterq-dump`. If an existing settings file does not contain the required
# values, error out with a helpful message.
if [[ ! -f "${NCBI_SETTINGS}" ]]; then
printf '!{config}' > 'user-settings.mkfg'
else
prefetch --help &> /dev/null
if [[ $? = 78 ]]; then
echo "You have an existing vdb-config at '${NCBI_SETTINGS}' but it is"\
"missing the required entries for /LIBS/GUID and"\
"/libs/cloud/report_instance_identity."\
"Feel free to add the following to your settings file:" >&2
echo "$(printf '!{config}')" >&2
exit 1
fi
fasterq-dump --help &> /dev/null
if [[ $? = 78 ]]; then
echo "You have an existing vdb-config at '${NCBI_SETTINGS}' but it is"\
"missing the required entries for /LIBS/GUID and"\
"/libs/cloud/report_instance_identity."\
"Feel free to add the following to your settings file:" >&2
echo "$(printf '!{config}')" >&2
exit 1
fi
if [[ "${NCBI_SETTINGS}" != *.mkfg ]]; then
echo "The detected settings '${NCBI_SETTINGS}' do not have the required"\
"file extension '.mkfg'." >&2
exit 1
fi
cp "${NCBI_SETTINGS}" ./
fi
cat <<-END_VERSIONS > versions.yml
"!{task.process}":
sratools: $(vdb-config --version 2>&1 | grep -Eo '[0-9.]+')
END_VERSIONS

View file

@ -0,0 +1,48 @@
process GATK4_SPLITINTERVALS {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::gatk4=4.2.6.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/gatk4:4.2.6.1--hdfd78af_0':
'quay.io/biocontainers/gatk4:4.2.6.1--hdfd78af_0' }"
input:
tuple val(meta), path(intervals)
path(fasta)
path(fasta_fai)
path(dict)
output:
tuple val(meta), path("**.interval_list"), emit: split_intervals
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def reference = fasta ? "--reference $fasta" : ""
def avail_mem = 3
if (!task.memory) {
log.info '[GATK SplitIntervals] Available memory not known - defaulting to 3GB. Specify process memory requirements to change this.'
} else {
avail_mem = task.memory.giga
}
"""
gatk --java-options "-Xmx${avail_mem}g" SplitIntervals \\
--output ${prefix} \\
--intervals $intervals \\
$reference \\
--tmp-dir . \\
$args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gatk4: \$(echo \$(gatk --version 2>&1) | sed 's/^.*(GATK) v//; s/ .*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,53 @@
name: gatk4_splitintervals
keywords:
- interval
- bed
tools:
- gatk4:
description: Genome Analysis Toolkit (GATK4)
homepage: https://gatk.broadinstitute.org/hc/en-us
documentation: https://gatk.broadinstitute.org/hc/en-us/categories/360002369672s
tool_dev_url: https://github.com/broadinstitute/gatk
doi: "10.1158/1538-7445.AM2017-3590"
licence: ["BSD-3-clause"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- interval:
type: file
description: Interval list or BED
pattern: "*.{interval,interval_list,bed}"
- fasta:
type: file
description: Reference FASTA
pattern: "*.{fa,fasta}"
- fasta_fai:
type: file
description: Reference FASTA index
pattern: "*.fai"
- dict:
type: file
description: Reference sequence dictionary
pattern: "*.dict"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- bed:
type: file
description: A list of scattered interval lists
pattern: "*.interval_list"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@nvnieuwk"

View file

@ -0,0 +1,40 @@
process GENOMESCOPE2 {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::genomescope2=2.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/genomescope2:2.0--py310r41hdfd78af_5':
'quay.io/biocontainers/genomescope2:2.0--py310r41hdfd78af_5' }"
input:
tuple val(meta), path(histogram)
output:
tuple val(meta), path("*_linear_plot.png") , emit: linear_plot_png
tuple val(meta), path("*_transformed_linear_plot.png"), emit: transformed_linear_plot_png
tuple val(meta), path("*_log_plot.png") , emit: log_plot_png
tuple val(meta), path("*_transformed_log_plot.png") , emit: transformed_log_plot_png
tuple val(meta), path("*_model.txt") , emit: model
tuple val(meta), path("*_summary.txt") , emit: summary
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
prefix = task.ext.prefix ?: "${meta.id}"
"""
genomescope2 \\
--input $histogram \\
$args \\
--output . \\
--name_prefix $prefix
cat <<-END_VERSIONS > versions.yml
'${task.process}':
genomescope2: \$( genomescope2 -v | sed 's/GenomeScope //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,67 @@
name: "genomescope2"
description: Estimate genome heterozygosity, repeat content, and size from sequencing reads using a kmer-based statistical approach
keywords:
- "genome size"
- "genome heterozygosity"
- "repeat content"
tools:
- "genomescope2":
description: "Reference-free profiling of polyploid genomes"
homepage: "http://qb.cshl.edu/genomescope/genomescope2.0/"
documentation: "https://github.com/tbenavi1/genomescope2.0/blob/master/README.md"
tool_dev_url: "https://github.com/tbenavi1/genomescope2.0"
doi: "https://doi.org/10.1038/s41467-020-14998-3"
licence: "['Apache License, Version 2.0 (Apache-2.0)']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- histogram:
type: file
description: A K-mer histogram file
pattern: "*.hist"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- linear_plot_png:
type: file
description: A genomescope2 linear plot in PNG format
pattern: "*_linear_plot.png"
- linear_plot_png:
type: file
description: A genomescope2 linear plot in PNG format
pattern: "*_linear_plot.png"
- transformed_linear_plot_png:
type: file
description: A genomescope2 transformed linear plot in PNG format
pattern: "*_transformed_linear_plot.png"
- log_plot_png:
type: file
description: A genomescope2 log plot in PNG format
pattern: "*_log_plot.png"
- transformed_log_plot_png:
type: file
description: A genomescope2 transformed log plot in PNG format
pattern: "*_transformed_log_plot.png"
- model:
type: file
description: Genomescope2 model fit summary
pattern: "*_model.txt"
- summary:
type: file
description: Genomescope2 histogram summary
pattern: "*_summary.txt"
authors:
- "@mahesh-panchal"

View file

@ -0,0 +1,37 @@
process MERYL_COUNT {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::meryl=1.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/meryl:1.3--h87f3376_1':
'quay.io/biocontainers/meryl:1.3--h87f3376_1' }"
input:
tuple val(meta), path(reads)
output:
tuple val(meta), path("*.meryldb"), emit: meryl_db
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
for READ in $reads; do
meryl count \\
threads=$task.cpus \\
$args \\
$reads \\
output read.\${READ%.f*}.meryldb
done
cat <<-END_VERSIONS > versions.yml
"${task.process}":
meryl: \$( meryl --version |& sed 's/meryl //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,43 @@
name: "meryl_count"
description: A genomic k-mer counter (and sequence utility) with nice features.
keywords:
- k-mer
- count
tools:
- "meryl":
description: "A genomic k-mer counter (and sequence utility) with nice features. "
homepage: "https://github.com/marbl/meryl"
documentation: "https://meryl.readthedocs.io/en/latest/quick-start.html"
tool_dev_url: "https://github.com/marbl/meryl"
doi: ""
licence: "['GPL']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: |
List of input FastQ files of size 1 and 2 for single-end and paired-end data,
respectively.
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- meryl_db:
type: directory
description: A Meryl k-mer database
pattern: "*.meryldb"
authors:
- "@mahesh-panchal"

View file

@ -0,0 +1,34 @@
process MERYL_HISTOGRAM {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::meryl=1.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/meryl:1.3--h87f3376_1':
'quay.io/biocontainers/meryl:1.3--h87f3376_1' }"
input:
tuple val(meta), path(meryl_db)
output:
tuple val(meta), path("*.hist"), emit: hist
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
meryl histogram \\
threads=$task.cpus \\
$args \\
$meryl_db > ${prefix}.hist
cat <<-END_VERSIONS > versions.yml
"${task.process}":
meryl: \$( meryl --version |& sed 's/meryl //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,41 @@
name: "meryl_histogram"
description: A genomic k-mer counter (and sequence utility) with nice features.
keywords:
- k-mer
- histogram
tools:
- "meryl":
description: "A genomic k-mer counter (and sequence utility) with nice features. "
homepage: "https://github.com/marbl/meryl"
documentation: "https://meryl.readthedocs.io/en/latest/quick-start.html"
tool_dev_url: "https://github.com/marbl/meryl"
doi: ""
licence: "['GPL']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- meryl_dbs:
type: directory
description: Meryl k-mer database
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- hist:
type: file
description: Histogram of k-mers
pattern: "*.hist"
authors:
- "@mahesh-panchal"

View file

@ -0,0 +1,35 @@
process MERYL_UNIONSUM {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::meryl=1.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/meryl:1.3--h87f3376_1':
'quay.io/biocontainers/meryl:1.3--h87f3376_1' }"
input:
tuple val(meta), path(meryl_dbs)
output:
tuple val(meta), path("*.unionsum.meryldb"), emit: meryl_db
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
meryl union-sum \\
threads=$task.cpus \\
$args \\
output ${prefix}.unionsum.meryldb \\
$meryl_dbs
cat <<-END_VERSIONS > versions.yml
"${task.process}":
meryl: \$( meryl --version |& sed 's/meryl //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,41 @@
name: "meryl_unionsum"
description: A genomic k-mer counter (and sequence utility) with nice features.
keywords:
- k-mer
- unionsum
tools:
- "meryl":
description: "A genomic k-mer counter (and sequence utility) with nice features. "
homepage: "https://github.com/marbl/meryl"
documentation: "https://meryl.readthedocs.io/en/latest/quick-start.html"
tool_dev_url: "https://github.com/marbl/meryl"
doi: ""
licence: "['GPL']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- meryl_dbs:
type: directory
description: Meryl k-mer databases
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- meryl_db:
type: directory
description: A Meryl k-mer database that is the union sum of the input databases
pattern: "*.unionsum.meryldb"
authors:
- "@mahesh-panchal"

View file

@ -0,0 +1,60 @@
process RTGTOOLS_VCFEVAL {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::rtg-tools=3.12.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/rtg-tools:3.12.1--hdfd78af_0':
'quay.io/biocontainers/rtg-tools:3.12.1--hdfd78af_0' }"
input:
tuple val(meta), path(query_vcf), path(query_vcf_tbi)
tuple path(truth_vcf), path(truth_vcf_tbi)
path(truth_regions)
path(evaluation_regions)
path(sdf)
output:
tuple val(meta), path("**results/{done,progress,*.log}") , emit: logs
tuple val(meta), path("**tp.vcf.gz"), path("**tp.vcf.gz.tbi") , emit: tp
tuple val(meta), path("**fn.vcf.gz"), path("**fn.vcf.gz.tbi") , emit: fn
tuple val(meta), path("**fp.vcf.gz"), path("**fp.vcf.gz.tbi") , emit: fp
tuple val(meta), path("**baseline.vcf.gz"), path("**baseline.vcf.gz.tbi") , emit: baseline
tuple val(meta), path("**.tsv.gz") , emit: roc
tuple val(meta), path("**results/summary.txt") , emit: summary
tuple val(meta), path("**results/phasing.txt") , emit: phasing
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ""
def prefix = task.ext.prefix ?: "${meta.id}"
def bed_regions = truth_regions ? "--bed-regions=$truth_regions" : ""
def eval_regions = evaluation_regions ? "--evaluation-regions=$evaluation_regions" : ""
def truth_index = truth_vcf_tbi ? "" : "rtg index $truth_vcf"
def query_index = query_vcf_tbi ? "" : "rtg index $query_vcf"
def avail_mem = task.memory.toGiga() + "G"
"""
$truth_index
$query_index
rtg RTG_MEM=$avail_mem vcfeval \\
$args \\
--baseline=$truth_vcf \\
$bed_regions \\
$eval_regions \\
--calls=$query_vcf \\
--output=${prefix}_results \\
--template=$sdf \\
--threads=$task.cpus \\
cat <<-END_VERSIONS > versions.yml
"${task.process}":
rtg-tools: \$(echo \$(rtg version | head -n 1 | awk '{print \$4}'))
END_VERSIONS
"""
}

View file

@ -0,0 +1,95 @@
name: "rtgtools_vcfeval"
description: The VCFeval tool of RTG tools. It is used to evaluate called variants for agreement with a baseline variant set
keywords:
- benchmarking
- vcf
- rtg-tools
tools:
- "rtgtools":
description: "RealTimeGenomics Tools -- Utilities for accurate VCF comparison and manipulation"
homepage: "https://www.realtimegenomics.com/products/rtg-tools"
documentation: "https://github.com/RealTimeGenomics/rtg-tools"
tool_dev_url: "https://github.com/RealTimeGenomics/rtg-tools"
doi: ""
licence: "['BSD']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- truth_vcf:
type: file
description: A standard VCF to compare against
pattern: "*.{vcf,vcf.gz}"
- truth_vcf_index:
type: file
description: The index of the standard VCF (optional)
pattern: "*.tbi"
- query_vcf:
type: file
description: A VCF with called variants to benchmark against the standard
pattern: "*.{vcf,vcf.gz}"
- query_vcf_index:
type: file
description: The index of the called VCF (optional)
pattern: "*.tbi"
- truth_regions:
type: file
description: A BED file containining the strict regions where VCFeval should only evaluate the fully overlapping variants (optional)
pattern: "*.bed"
- evaluation_regions:
type: file
description: A BED file containing the regions where VCFeval will evaluate every fully and partially overlapping variant (optional)
pattern: "*.bed"
- sdf:
type: file
description: The SDF (RTG Sequence Data File) folder of the reference genome
pattern: "*"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- logging:
type: file
description: Files containing logging from vcfeval
pattern: "*{done,progress,.log}"
- tp:
type: file
description: A tuple containing the VCF and TBI file for the true positive variants
pattern: "tp.vcf{.gz,.gz.tbi}"
- baseline:
type: file
description: A tuple containing the VCF and TBI file for the baseline true positive variants
pattern: "tp-baseline.vcf{.gz,.gz.tbi}"
- fp:
type: file
description: A tuple containing the VCF and TBI file for the false positive variants
pattern: "fp.vcf{.gz,.gz.tbi}"
- fn:
type: file
description: A tuple containing the VCF and TBI file for the false negative variants
pattern: "fn.vcf{.gz,.gz.tbi}"
- roc:
type: file
description: TSV files containing ROC data for the evaluated variants
pattern: "*.tsv.gz"
- summary:
type: file
description: A TXT file containing the summary of the evaluation
pattern: "summary.txt"
- phasing:
type: file
description: A TXT file containing the data on the phasing
pattern: "phasing.txt"
authors:
- "@nvnieuwk"

View file

@ -1,5 +1,4 @@
//There is a -L option to only output alignments in interval, might be an option for exons/panel data?
process SAMTOOLS_BAMTOCRAM {
process SAMTOOLS_CONVERT {
tag "$meta.id"
label 'process_medium'
@ -14,8 +13,8 @@ process SAMTOOLS_BAMTOCRAM {
path fai
output:
tuple val(meta), path("*.cram"), path("*.crai"), emit: cram_crai
path "versions.yml" , emit: versions
tuple val(meta), path("*.{cram,bam}"), path("*.{crai,bai}") , emit: alignment_index
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
@ -23,9 +22,17 @@ process SAMTOOLS_BAMTOCRAM {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def output_extension = input.getExtension() == "bam" ? "cram" : "bam"
"""
samtools view --threads ${task.cpus} --reference ${fasta} -C $args $input > ${prefix}.cram
samtools index -@${task.cpus} ${prefix}.cram
samtools view \\
--threads ${task.cpus} \\
--reference ${fasta} \\
$args \\
$input \\
-o ${prefix}.${output_extension}
samtools index -@${task.cpus} ${prefix}.${output_extension}
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -1,5 +1,5 @@
name: samtools_bamtocram
description: filter/convert and then index CRAM file
name: samtools_convert
description: convert and then index CRAM -> BAM or BAM -> CRAM file
keywords:
- view
- index
@ -23,12 +23,12 @@ input:
e.g. [ id:'test', single_end:false ]
- input:
type: file
description: BAM/SAM file
pattern: "*.{bam,sam}"
description: BAM/CRAM file
pattern: "*.{bam,cram}"
- index:
type: file
description: BAM/SAM index file
pattern: "*.{bai,sai}"
description: BAM/CRAM index file
pattern: "*.{bai,crai}"
- fasta:
type: file
description: Reference file to create the CRAM file
@ -39,10 +39,10 @@ output:
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- cram_crai:
- alignment_index:
type: file
description: filtered/converted CRAM file + index
pattern: "*{.cram,.crai}"
description: filtered/converted BAM/CRAM file + index
pattern: "*{.bam/cram,.bai/crai}"
- version:
type: file
description: File containing software version

View file

@ -9,6 +9,7 @@ process SRATOOLS_FASTERQDUMP {
input:
tuple val(meta), path(sra)
path ncbi_settings
output:
tuple val(meta), path(output), emit: reads
@ -20,17 +21,12 @@ process SRATOOLS_FASTERQDUMP {
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def config = "/LIBS/GUID = \"${UUID.randomUUID().toString()}\"\\n/libs/cloud/report_instance_identity = \"true\"\\n"
// Paired-end data extracted by fasterq-dump (--split-3 the default) always creates
// *_1.fastq *_2.fastq files but sometimes also an additional *.fastq file
// for unpaired reads which we ignore here.
output = meta.single_end ? '*.fastq.gz' : '*_{1,2}.fastq.gz'
"""
eval "\$(vdb-config -o n NCBI_SETTINGS | sed 's/[" ]//g')"
if [[ ! -f "\${NCBI_SETTINGS}" ]]; then
mkdir -p "\$(dirname "\${NCBI_SETTINGS}")"
printf '${config}' > "\${NCBI_SETTINGS}"
fi
export NCBI_SETTINGS="\$PWD/${ncbi_settings}"
fasterq-dump \\
$args \\

View file

@ -10,7 +10,7 @@ tools:
homepage: https://github.com/ncbi/sra-tools
documentation: https://github.com/ncbi/sra-tools/wiki
tool_dev_url: https://github.com/ncbi/sra-tools
licence: ["US-Government-Work"]
licence: ["Public Domain"]
input:
- meta:
@ -22,6 +22,11 @@ input:
type: directory
description: Directory containing ETL data for the given SRA.
pattern: "*/*.sra"
- ncbi_settings:
type: file
description: >
An NCBI user settings file.
pattern: "*.mkfg"
output:
- meta:

View file

@ -9,10 +9,11 @@ process SRATOOLS_PREFETCH {
input:
tuple val(meta), val(id)
path ncbi_settings
output:
tuple val(meta), path(id), emit: sra
path "versions.yml" , emit: versions
path 'versions.yml' , emit: versions
when:
task.ext.when == null || task.ext.when
@ -20,7 +21,5 @@ process SRATOOLS_PREFETCH {
shell:
args = task.ext.args ?: ''
args2 = task.ext.args2 ?: '5 1 100' // <num retries> <base delay in seconds> <max delay in seconds>
config = "/LIBS/GUID = \"${UUID.randomUUID().toString()}\"\\n/libs/cloud/report_instance_identity = \"true\"\\n"
template 'retry_with_backoff.sh'
}

View file

@ -10,7 +10,7 @@ tools:
homepage: https://github.com/ncbi/sra-tools
documentation: https://github.com/ncbi/sra-tools/wiki
tool_dev_url: https://github.com/ncbi/sra-tools
licence: ["US-Government-Work"]
licence: ["Public Domain"]
input:
- meta:
@ -22,6 +22,11 @@ input:
type: val
description: >
A string denoting an SRA id.
- ncbi_settings:
type: file
description: >
An NCBI user settings file.
pattern: "*.mkfg"
output:
- meta:

View file

@ -40,11 +40,7 @@ retry_with_backoff() {
echo "${output}"
}
eval "$(vdb-config -o n NCBI_SETTINGS | sed 's/[" ]//g')"
if [[ ! -f "${NCBI_SETTINGS}" ]]; then
mkdir -p "$(dirname "${NCBI_SETTINGS}")"
printf '!{config}' > "${NCBI_SETTINGS}"
fi
export NCBI_SETTINGS="$PWD/!{ncbi_settings}"
retry_with_backoff !{args2} \
prefetch \

View file

@ -1,34 +0,0 @@
//
// Download FASTQ sequencing reads from the NCBI's Sequence Read Archive (SRA).
//
params.prefetch_options = [:]
params.fasterqdump_options = [:]
include { SRATOOLS_PREFETCH } from '../../../modules/sratools/prefetch/main' addParams( options: params.prefetch_options )
include { SRATOOLS_FASTERQDUMP } from '../../../modules/sratools/fasterqdump/main' addParams( options: params.fasterqdump_options )
workflow SRA_FASTQ {
take:
sra_ids // channel: [ val(meta), val(id) ]
main:
ch_versions = Channel.empty()
//
// Prefetch sequencing reads in SRA format.
//
SRATOOLS_PREFETCH ( sra_ids )
ch_versions = ch_versions.mix( SRATOOLS_PREFETCH.out.versions.first() )
//
// Convert the SRA format into one or more compressed FASTQ files.
//
SRATOOLS_FASTERQDUMP ( SRATOOLS_PREFETCH.out.sra )
ch_versions = ch_versions.mix( SRATOOLS_FASTERQDUMP.out.versions.first() )
emit:
reads = SRATOOLS_FASTERQDUMP.out.reads // channel: [ val(meta), [ reads ] ]
versions = ch_versions // channel: [ versions.yml ]
}

View file

@ -0,0 +1,38 @@
include { CUSTOM_SRATOOLSNCBISETTINGS } from '../../../modules/custom/sratoolsncbisettings/main'
include { SRATOOLS_PREFETCH } from '../../../modules/sratools/prefetch/main'
include { SRATOOLS_FASTERQDUMP } from '../../../modules/sratools/fasterqdump/main'
/**
* Download FASTQ sequencing reads from the NCBI's Sequence Read Archive (SRA).
*/
workflow SRAFASTQ {
take:
sra_ids // channel: [ val(meta), val(id) ]
main:
ch_versions = Channel.empty()
//
// Detect existing NCBI user settings or create new ones.
//
CUSTOM_SRATOOLSNCBISETTINGS()
def settings = CUSTOM_SRATOOLSNCBISETTINGS.out.ncbi_settings
ch_versions = ch_versions.mix( CUSTOM_SRATOOLSNCBISETTINGS.out.versions )
//
// Prefetch sequencing reads in SRA format.
//
SRATOOLS_PREFETCH ( sra_ids, settings )
ch_versions = ch_versions.mix( SRATOOLS_PREFETCH.out.versions.first() )
//
// Convert the SRA format into one or more compressed FASTQ files.
//
SRATOOLS_FASTERQDUMP ( SRATOOLS_PREFETCH.out.sra, settings )
ch_versions = ch_versions.mix( SRATOOLS_FASTERQDUMP.out.versions.first() )
emit:
reads = SRATOOLS_FASTERQDUMP.out.reads // channel: [ val(meta), [ reads ] ]
versions = ch_versions // channel: [ versions.yml ]
}

View file

@ -1,11 +1,14 @@
name: sra_fastq
description: Download FASTQ sequencing reads from the NCBI's Sequence Read Archive (SRA).
keywords:
- SRA
- NCBI
- sequencing
- FASTQ
- prefetch
- dump
- fasterq-dump
modules:
- custom/sratoolsncbisettings
- sratools/prefetch
- sratools/fasterqdump
input:
@ -17,7 +20,7 @@ input:
- id:
type: string
description: >
SRA identifier.
SRA run identifier.
# TODO Update when we decide on a standard for subworkflow docs
output:
- meta:

View file

@ -495,6 +495,10 @@ custom/getchromsizes:
- modules/custom/getchromsizes/**
- tests/modules/custom/getchromsizes/**
custom/sratoolsncbisettings:
- modules/custom/sratoolsncbisettings/**
- tests/modules/custom/sratoolsncbisettings/**
cutadapt:
- modules/cutadapt/**
- tests/modules/cutadapt/**
@ -815,6 +819,10 @@ gatk4/selectvariants:
- modules/gatk4/selectvariants/**
- tests/modules/gatk4/selectvariants/**
gatk4/splitintervals:
- modules/gatk4/splitintervals/**
- tests/modules/gatk4/splitintervals/**
gatk4/splitncigarreads:
- modules/gatk4/splitncigarreads/**
- tests/modules/gatk4/splitncigarreads/**
@ -835,6 +843,10 @@ genmap/mappability:
- modules/genmap/mappability/**
- tests/modules/genmap/mappability/**
genomescope2:
- modules/genomescope2/**
- tests/modules/genomescope2/**
genrich:
- modules/genrich/**
- tests/modules/genrich/**
@ -1214,6 +1226,18 @@ meningotype:
- modules/meningotype/**
- tests/modules/meningotype/**
meryl/count:
- modules/meryl/count/**
- tests/modules/meryl/count/**
meryl/histogram:
- modules/meryl/histogram/**
- tests/modules/meryl/histogram/**
meryl/unionsum:
- modules/meryl/unionsum/**
- tests/modules/meryl/unionsum/**
metabat2/jgisummarizebamcontigdepths:
- modules/metabat2/jgisummarizebamcontigdepths/**
- tests/modules/metabat2/jgisummarizebamcontigdepths/**
@ -1611,6 +1635,10 @@ rseqc/tin:
- modules/rseqc/tin/**
- tests/modules/rseqc/tin/**
rtgtools/vcfeval:
- modules/rtgtools/vcfeval/**
- tests/modules/rtgtools/vcfeval/**
salmon/index:
- modules/salmon/index/**
- tests/modules/salmon/index/**
@ -1631,14 +1659,14 @@ samtools/bam2fq:
- modules/samtools/bam2fq/**
- tests/modules/samtools/bam2fq/**
samtools/bamtocram:
- modules/samtools/bamtocram/**
- tests/modules/samtools/bamtocram/**
samtools/collatefastq:
- modules/samtools/collatefastq/**
- tests/modules/samtools/collatefastq/**
samtools/convert:
- modules/samtools/convert/**
- tests/modules/samtools/convert/**
samtools/depth:
- modules/samtools/depth/**
- tests/modules/samtools/depth/**
@ -1767,14 +1795,14 @@ slimfastq:
- modules/slimfastq/**
- tests/modules/slimfastq/**
snapaligner/index:
- modules/snapaligner/index/**
- tests/modules/snapaligner/index/**
snapaligner/align:
- modules/snapaligner/align/**
- tests/modules/snapaligner/align/**
snapaligner/index:
- modules/snapaligner/index/**
- tests/modules/snapaligner/index/**
snpdists:
- modules/snpdists/**
- tests/modules/snpdists/**

View file

@ -135,6 +135,7 @@ params {
transcriptome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/transcriptome.fasta"
genome2_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome2.fasta"
genome_chain_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.chain.gz"
genome_21_sdf = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/genome_sdf.tar.gz"
genome_21_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/genome.fasta"
genome_21_fasta_fai = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/genome.fasta.fai"
genome_21_dict = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/genome.dict"
@ -212,110 +213,113 @@ params {
test_paired_end_hla = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/example_hla_pe.bam"
test_paired_end_hla_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/example_hla_pe.sorted.bam"
test_paired_end_hla_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/example_hla_pe.sorted.bam.bai"
test2_paired_end_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.sorted.bam"
test2_paired_end_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.sorted.bam.bai"
test2_paired_end_name_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.name.sorted.bam"
test2_paired_end_markduplicates_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.markduplicates.sorted.bam"
test2_paired_end_markduplicates_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.markduplicates.sorted.bam.bai"
test2_paired_end_recalibrated_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.recalibrated.sorted.bam"
test2_paired_end_recalibrated_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.recalibrated.sorted.bam.bai"
test2_paired_end_umi_consensus_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.umi_consensus.bam"
test2_paired_end_umi_converted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.umi_converted.bam"
test2_paired_end_umi_grouped_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.umi_grouped.bam"
test2_paired_end_umi_histogram_txt = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.umi_histogram.txt"
test2_paired_end_umi_unsorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.umi_unsorted.bam"
test2_paired_end_umi_unsorted_tagged_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.unsorted_tagged.bam"
test2_paired_end_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.sorted.bam"
test2_paired_end_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.sorted.bam.bai"
test2_paired_end_name_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.name.sorted.bam"
test2_paired_end_markduplicates_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.markduplicates.sorted.bam"
test2_paired_end_markduplicates_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.markduplicates.sorted.bam.bai"
test2_paired_end_recalibrated_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.recalibrated.sorted.bam"
test2_paired_end_recalibrated_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.recalibrated.sorted.bam.bai"
test2_paired_end_umi_consensus_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.umi_consensus.bam"
test2_paired_end_umi_converted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.umi_converted.bam"
test2_paired_end_umi_grouped_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.umi_grouped.bam"
test2_paired_end_umi_histogram_txt = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.umi_histogram.txt"
test2_paired_end_umi_unsorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.umi_unsorted.bam"
test2_paired_end_umi_unsorted_tagged_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test2.paired_end.unsorted_tagged.bam"
mitochon_standin_recalibrated_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/mitochon_standin.recalibrated.sorted.bam"
mitochon_standin_recalibrated_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/mitochon_standin.recalibrated.sorted.bam.bai"
mitochon_standin_recalibrated_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/mitochon_standin.recalibrated.sorted.bam"
mitochon_standin_recalibrated_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/mitochon_standin.recalibrated.sorted.bam.bai"
test_paired_end_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.sorted.cram"
test_paired_end_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.sorted.cram.crai"
test_paired_end_markduplicates_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.markduplicates.sorted.cram"
test_paired_end_markduplicates_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.markduplicates.sorted.cram.crai"
test_paired_end_recalibrated_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.recalibrated.sorted.cram"
test_paired_end_recalibrated_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.recalibrated.sorted.cram.crai"
test_paired_end_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.sorted.cram"
test_paired_end_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.sorted.cram.crai"
test_paired_end_markduplicates_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.markduplicates.sorted.cram"
test_paired_end_markduplicates_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.markduplicates.sorted.cram.crai"
test_paired_end_recalibrated_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.recalibrated.sorted.cram"
test_paired_end_recalibrated_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test.paired_end.recalibrated.sorted.cram.crai"
test2_paired_end_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.sorted.cram"
test2_paired_end_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.sorted.cram.crai"
test2_paired_end_markduplicates_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.markduplicates.sorted.cram"
test2_paired_end_markduplicates_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.markduplicates.sorted.cram.crai"
test2_paired_end_recalibrated_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.recalibrated.sorted.cram"
test2_paired_end_recalibrated_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.recalibrated.sorted.cram.crai"
test2_paired_end_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.sorted.cram"
test2_paired_end_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.sorted.cram.crai"
test2_paired_end_markduplicates_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.markduplicates.sorted.cram"
test2_paired_end_markduplicates_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.markduplicates.sorted.cram.crai"
test2_paired_end_recalibrated_sorted_cram = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.recalibrated.sorted.cram"
test2_paired_end_recalibrated_sorted_cram_crai = "${test_data_dir}/genomics/homo_sapiens/illumina/cram/test2.paired_end.recalibrated.sorted.cram.crai"
test_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test_1.fastq.gz"
test_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test_2.fastq.gz"
test_umi_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test.umi_1.fastq.gz"
test_umi_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test.umi_2.fastq.gz"
test2_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2_1.fastq.gz"
test2_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2_2.fastq.gz"
test2_umi_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2.umi_1.fastq.gz"
test2_umi_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2.umi_2.fastq.gz"
test_rnaseq_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test_rnaseq_1.fastq.gz"
test_rnaseq_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test_rnaseq_2.fastq.gz"
test_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test_1.fastq.gz"
test_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test_2.fastq.gz"
test_umi_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test.umi_1.fastq.gz"
test_umi_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test.umi_2.fastq.gz"
test2_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2_1.fastq.gz"
test2_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2_2.fastq.gz"
test2_umi_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2.umi_1.fastq.gz"
test2_umi_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2.umi_2.fastq.gz"
test_rnaseq_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test_rnaseq_1.fastq.gz"
test_rnaseq_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test_rnaseq_2.fastq.gz"
test_baserecalibrator_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test.baserecalibrator.table"
test2_baserecalibrator_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test2.baserecalibrator.table"
test_pileups_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test.pileups.table"
test2_pileups_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test2.pileups.table"
test_baserecalibrator_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test.baserecalibrator.table"
test2_baserecalibrator_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test2.baserecalibrator.table"
test_pileups_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test.pileups.table"
test2_pileups_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test2.pileups.table"
test_genomicsdb_tar_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test_genomicsdb.tar.gz"
test_pon_genomicsdb_tar_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test_pon_genomicsdb.tar.gz"
test_genomicsdb_tar_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test_genomicsdb.tar.gz"
test_pon_genomicsdb_tar_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test_pon_genomicsdb.tar.gz"
test2_haplotc_ann_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/haplotypecaller_calls/test2_haplotc.ann.vcf.gz"
test2_haplotc_ann_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/haplotypecaller_calls/test2_haplotc.ann.vcf.gz.tbi"
test2_haplotc_ann_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/haplotypecaller_calls/test2_haplotc.ann.vcf.gz"
test2_haplotc_ann_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/haplotypecaller_calls/test2_haplotc.ann.vcf.gz.tbi"
test2_recal = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2.recal"
test2_recal_idx = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2.recal.idx"
test2_tranches = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2.tranches"
test2_allele_specific_recal = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2_allele_specific.recal"
test2_allele_specific_recal_idx = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2_allele_specific.recal.idx"
test2_allele_specific_tranches = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2_allele_specific.tranches"
test2_haplotc_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/haplotypecaller_calls/test2_haplotc.vcf.gz"
test2_haplotc_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/haplotypecaller_calls/test2_haplotc.vcf.gz.tbi"
test_test2_paired_mutect2_calls_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/paired_mutect2_calls/test_test2_paired_mutect2_calls.vcf.gz"
test_test2_paired_mutect2_calls_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/paired_mutect2_calls/test_test2_paired_mutect2_calls.vcf.gz.tbi"
test_test2_paired_mutect2_calls_vcf_gz_stats = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/paired_mutect2_calls/test_test2_paired_mutect2_calls.vcf.gz.stats"
test_test2_paired_mutect2_calls_f1r2_tar_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/paired_mutect2_calls/test_test2_paired_mutect2_calls.f1r2.tar.gz"
test_test2_paired_mutect2_calls_artifact_prior_tar_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test_test2_paired_mutect2_calls.artifact-prior.tar.gz"
test_test2_paired_segmentation_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test_test2_paired.segmentation.table"
test_test2_paired_contamination_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test_test2_paired.contamination.table"
test2_recal = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2.recal"
test2_recal_idx = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2.recal.idx"
test2_tranches = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2.tranches"
test2_allele_specific_recal = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2_allele_specific.recal"
test2_allele_specific_recal_idx = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2_allele_specific.recal.idx"
test2_allele_specific_tranches = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/variantrecalibrator/test2_allele_specific.tranches"
test_genome_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test.genome.vcf"
test_genome_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test.genome.vcf.gz"
test_genome_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test.genome.vcf.gz.tbi"
test_genome_vcf_idx = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test.genome.vcf.idx"
test_test2_paired_mutect2_calls_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/paired_mutect2_calls/test_test2_paired_mutect2_calls.vcf.gz"
test_test2_paired_mutect2_calls_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/paired_mutect2_calls/test_test2_paired_mutect2_calls.vcf.gz.tbi"
test_test2_paired_mutect2_calls_vcf_gz_stats = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/paired_mutect2_calls/test_test2_paired_mutect2_calls.vcf.gz.stats"
test_test2_paired_mutect2_calls_f1r2_tar_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/paired_mutect2_calls/test_test2_paired_mutect2_calls.f1r2.tar.gz"
test_test2_paired_mutect2_calls_artifact_prior_tar_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test_test2_paired_mutect2_calls.artifact-prior.tar.gz"
test_test2_paired_segmentation_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test_test2_paired.segmentation.table"
test_test2_paired_contamination_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test_test2_paired.contamination.table"
test2_genome_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test2.genome.vcf"
test2_genome_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test2.genome.vcf.gz"
test2_genome_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test2.genome.vcf.gz.tbi"
test2_genome_vcf_idx = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test2.genome.vcf.idx"
test_genome_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test.genome.vcf"
test_genome_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test.genome.vcf.gz"
test_genome_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test.genome.vcf.gz.tbi"
test_genome_vcf_idx = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test.genome.vcf.idx"
test_genome21_indels_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/test.genome_21.somatic_sv.vcf.gz"
test_genome21_indels_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/test.genome_21.somatic_sv.vcf.gz.tbi"
test2_genome_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test2.genome.vcf"
test2_genome_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test2.genome.vcf.gz"
test2_genome_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test2.genome.vcf.gz.tbi"
test2_genome_vcf_idx = "${test_data_dir}/genomics/homo_sapiens/illumina/gvcf/test2.genome.vcf.idx"
test_mpileup = "${test_data_dir}/genomics/homo_sapiens/illumina/mpileup/test.mpileup.gz"
test2_mpileup = "${test_data_dir}/genomics/homo_sapiens/illumina/mpileup/test2.mpileup.gz"
test_genome21_indels_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/test.genome_21.somatic_sv.vcf.gz"
test_genome21_indels_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/test.genome_21.somatic_sv.vcf.gz.tbi"
test_broadpeak = "${test_data_dir}/genomics/homo_sapiens/illumina/broadpeak/test.broadPeak"
test2_broadpeak = "${test_data_dir}/genomics/homo_sapiens/illumina/broadpeak/test2.broadPeak"
test_mpileup = "${test_data_dir}/genomics/homo_sapiens/illumina/mpileup/test.mpileup.gz"
test2_mpileup = "${test_data_dir}/genomics/homo_sapiens/illumina/mpileup/test2.mpileup.gz"
test_narrowpeak = "${test_data_dir}/genomics/homo_sapiens/illumina/narrowpeak/test.narrowPeak"
test2_narrowpeak = "${test_data_dir}/genomics/homo_sapiens/illumina/narrowpeak/test2.narrowPeak"
test_broadpeak = "${test_data_dir}/genomics/homo_sapiens/illumina/broadpeak/test.broadPeak"
test2_broadpeak = "${test_data_dir}/genomics/homo_sapiens/illumina/broadpeak/test2.broadPeak"
test_10x_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/10xgenomics/test_10x_S1_L001_R1_001.fastq.gz"
test_10x_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/10xgenomics/test_10x_S1_L001_R2_001.fastq.gz"
test_narrowpeak = "${test_data_dir}/genomics/homo_sapiens/illumina/narrowpeak/test.narrowPeak"
test2_narrowpeak = "${test_data_dir}/genomics/homo_sapiens/illumina/narrowpeak/test2.narrowPeak"
test_yak = "${test_data_dir}/genomics/homo_sapiens/illumina/yak/test.yak"
test2_yak = "${test_data_dir}/genomics/homo_sapiens/illumina/yak/test2.yak"
test_10x_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/10xgenomics/test_10x_S1_L001_R1_001.fastq.gz"
test_10x_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/10xgenomics/test_10x_S1_L001_R2_001.fastq.gz"
cutandrun_bedgraph_test_1 = "${test_data_dir}/genomics/homo_sapiens/illumina/bedgraph/cutandtag_h3k27me3_test_1.bedGraph"
cutandrun_bedgraph_test_2 = "${test_data_dir}/genomics/homo_sapiens/illumina/bedgraph/cutandtag_igg_test_1.bedGraph"
test_yak = "${test_data_dir}/genomics/homo_sapiens/illumina/yak/test.yak"
test2_yak = "${test_data_dir}/genomics/homo_sapiens/illumina/yak/test2.yak"
test_rnaseq_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/test.rnaseq.vcf"
test_sv_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/sv_query.vcf.gz"
cutandrun_bedgraph_test_1 = "${test_data_dir}/genomics/homo_sapiens/illumina/bedgraph/cutandtag_h3k27me3_test_1.bedGraph"
cutandrun_bedgraph_test_2 = "${test_data_dir}/genomics/homo_sapiens/illumina/bedgraph/cutandtag_igg_test_1.bedGraph"
test_pytor = "${test_data_dir}/genomics/homo_sapiens/illumina/pytor/test.pytor"
test_rnaseq_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/test.rnaseq.vcf"
test_sv_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/sv_query.vcf.gz"
test_pytor = "${test_data_dir}/genomics/homo_sapiens/illumina/pytor/test.pytor"
}
'pacbio' {
primers = "${test_data_dir}/genomics/homo_sapiens/pacbio/fasta/primers.fasta"
@ -345,6 +349,7 @@ params {
genome_gbff_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.gbff.gz"
genome_paf = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.paf"
genome_mapping_potential_arg = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.mapping.potential.ARG"
genome_gff_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.gff.gz"
}
'illumina' {
@ -377,7 +382,7 @@ params {
test3_gff = "${test_data_dir}/genomics/prokaryotes/candidatus_portiera_aleyrodidarum/genome/gff/test3.gff"
}
'illumina' {
test_1_fastq_gz = "${test_data_dir}/genomics/prokaryotes/candidatus_portiera_aleyrodidarum/illumina/fasta/test_1.fastq.gz"
test_1_fastq_gz = "${test_data_dir}/genomics/prokaryotes/candidatus_portiera_aleyrodidarum/illumina/fastq/test_1.fastq.gz"
test_2_fastq_gz = "${test_data_dir}/genomics/prokaryotes/candidatus_portiera_aleyrodidarum/illumina/fastq/test_2.fastq.gz"
test_se_fastq_gz = "${test_data_dir}/genomics/prokaryotes/candidatus_portiera_aleyrodidarum/illumina/fastq/test_se.fastq.gz"
}
@ -421,6 +426,9 @@ params {
test_merge_cool_cp2 = "${test_data_dir}/genomics/homo_sapiens/cooler/merge/toy/toy.symm.upper.2.cp2.cool"
}
'config' {
ncbi_user_settings = "${test_data_dir}/generic/config/ncbi_user_settings.mkfg"
}
}
}
}

View file

@ -20,7 +20,7 @@ workflow test_arriba_single_end {
STAR_GENOMEGENERATE ( fasta, gtf )
STAR_ALIGN ( input, STAR_GENOMEGENERATE.out.index, gtf, star_ignore_sjdbgtf, seq_platform, seq_center )
ARRIBA ( STAR_ALIGN.out.bam, fasta, gtf )
ARRIBA ( STAR_ALIGN.out.bam, fasta, gtf , [], [], [], [], [])
}
workflow test_arriba_paired_end {
@ -38,5 +38,5 @@ workflow test_arriba_paired_end {
STAR_GENOMEGENERATE ( fasta, gtf )
STAR_ALIGN ( input, STAR_GENOMEGENERATE.out.index, gtf, star_ignore_sjdbgtf, seq_platform, seq_center )
ARRIBA ( STAR_ALIGN.out.bam, fasta, gtf )
ARRIBA ( STAR_ALIGN.out.bam, fasta, gtf, [], [], [], [], [])
}

View file

@ -4,7 +4,7 @@
- arriba
files:
- path: output/arriba/test.fusions.discarded.tsv
md5sum: cad8c215b938d1e45b747a5b7898a4c2
md5sum: 7602ab4ccbbb0c54fbca12a942877e6d
- path: output/arriba/test.fusions.tsv
md5sum: 7c3383f7eb6d79b84b0bd30a7ef02d70
- path: output/star/star/Genome
@ -39,6 +39,7 @@
- path: output/star/star/transcriptInfo.tab
md5sum: 0c3a5adb49d15e5feff81db8e29f2e36
- path: output/star/test.Aligned.out.bam
md5sum: 4fa079d11f8938e51015e3e477fa7149
- path: output/star/test.Log.final.out
- path: output/star/test.Log.out
- path: output/star/test.Log.progress.out
@ -50,7 +51,7 @@
- arriba
files:
- path: output/arriba/test.fusions.discarded.tsv
md5sum: 85e36c887464e4deaa65f45174d3b8fd
md5sum: cdc6cfbc75e68ce29a766f50f390274d
- path: output/arriba/test.fusions.tsv
md5sum: 7c3383f7eb6d79b84b0bd30a7ef02d70
- path: output/star/star/Genome

View file

@ -30,6 +30,35 @@ workflow test_bowtie2_align_paired_end {
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
save_unaligned = false
BOWTIE2_BUILD ( fasta )
BOWTIE2_ALIGN ( input, BOWTIE2_BUILD.out.index, save_unaligned )
}
workflow test_bowtie2_align_single_end_large_index {
input = [
[ id:'test', single_end:true ], // meta map
[
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
save_unaligned = false
BOWTIE2_BUILD ( fasta )
BOWTIE2_ALIGN ( input, BOWTIE2_BUILD.out.index, save_unaligned )
}
workflow test_bowtie2_align_paired_end_large_index {
input = [
[ id:'test', single_end:false ], // meta map
[
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true)
]
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
save_unaligned = false
BOWTIE2_BUILD ( fasta )
BOWTIE2_ALIGN ( input, BOWTIE2_BUILD.out.index, save_unaligned )
}

View file

@ -5,6 +5,7 @@ params {
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}
if (params.force_large_index) {

View file

@ -1,4 +1,4 @@
- name: bowtie2 align single-end
- name: bowtie2 align test_bowtie2_align_single_end
command: nextflow run ./tests/modules/bowtie2/align -entry test_bowtie2_align_single_end -c ./tests/config/nextflow.config -c ./tests/modules/bowtie2/align/nextflow.config
tags:
- bowtie2
@ -6,78 +6,34 @@
files:
- path: ./output/bowtie2/test.bam
- path: ./output/bowtie2/test.bowtie2.log
- path: ./output/bowtie2/bowtie2/genome.3.bt2
md5sum: 4ed93abba181d8dfab2e303e33114777
- path: ./output/bowtie2/bowtie2/genome.2.bt2
md5sum: 47b153cd1319abc88dda532462651fcf
- path: ./output/bowtie2/bowtie2/genome.1.bt2
md5sum: cbe3d0bbea55bc57c99b4bfa25b5fbdf
- path: ./output/bowtie2/bowtie2/genome.4.bt2
md5sum: c25be5f8b0378abf7a58c8a880b87626
- path: ./output/bowtie2/bowtie2/genome.rev.1.bt2
md5sum: 52be6950579598a990570fbcf5372184
- path: ./output/bowtie2/bowtie2/genome.rev.2.bt2
md5sum: e3b4ef343dea4dd571642010a7d09597
- path: ./output/bowtie2/versions.yml
- name: bowtie2 align paired-end
command: nextflow run ./tests/modules/bowtie2/align -entry test_bowtie2_align_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/bowtie2/align/nextflow.config
- name: bowtie2 align test_bowtie2_align_paired_end
command: nextflow run tests/modules/bowtie2/align -entry test_bowtie2_align_paired_end -c tests/config/nextflow.config -c tests/modules/bowtie2/align/nextflow.config
tags:
- bowtie2
- bowtie2/align
files:
- path: ./output/bowtie2/test.bam
- path: ./output/bowtie2/test.bowtie2.log
- path: ./output/bowtie2/bowtie2/genome.3.bt2
md5sum: 4ed93abba181d8dfab2e303e33114777
- path: ./output/bowtie2/bowtie2/genome.2.bt2
md5sum: 47b153cd1319abc88dda532462651fcf
- path: ./output/bowtie2/bowtie2/genome.1.bt2
md5sum: cbe3d0bbea55bc57c99b4bfa25b5fbdf
- path: ./output/bowtie2/bowtie2/genome.4.bt2
md5sum: c25be5f8b0378abf7a58c8a880b87626
- path: ./output/bowtie2/bowtie2/genome.rev.1.bt2
md5sum: 52be6950579598a990570fbcf5372184
- path: ./output/bowtie2/bowtie2/genome.rev.2.bt2
md5sum: e3b4ef343dea4dd571642010a7d09597
- path: ./output/bowtie2/versions.yml
- name: bowtie2 align single-end large-index
command: nextflow run ./tests/modules/bowtie2/align -entry test_bowtie2_align_single_end -c ./tests/config/nextflow.config -c ./tests/modules/bowtie2/align/nextflow.config --force_large_index
- name: bowtie2 align test_bowtie2_align_single_end_large_index
command: nextflow run tests/modules/bowtie2/align -entry test_bowtie2_align_single_end_large_index -c tests/config/nextflow.config -c tests/modules/bowtie2/align/nextflow.config --force_large_index
tags:
- bowtie2
- bowtie2/align
files:
- path: ./output/bowtie2/test.bam
- path: ./output/bowtie2/test.bowtie2.log
- path: ./output/bowtie2/bowtie2/genome.3.bt2l
md5sum: 8952b3e0b1ce9a7a5916f2e147180853
- path: ./output/bowtie2/bowtie2/genome.2.bt2l
md5sum: 22c284084784a0720989595e0c9461fd
- path: ./output/bowtie2/bowtie2/genome.1.bt2l
md5sum: 07d811cd4e350d56267183d2ac7023a5
- path: ./output/bowtie2/bowtie2/genome.4.bt2l
md5sum: c25be5f8b0378abf7a58c8a880b87626
- path: ./output/bowtie2/bowtie2/genome.rev.1.bt2l
md5sum: fda48e35925fb24d1c0785f021981e25
- path: ./output/bowtie2/bowtie2/genome.rev.2.bt2l
md5sum: 802c26d32b970e1b105032b7ce7348b4
- path: ./output/bowtie2/versions.yml
- name: bowtie2 align paired-end large-index
command: nextflow run ./tests/modules/bowtie2/align -entry test_bowtie2_align_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/bowtie2/align/nextflow.config --force_large_index
- name: bowtie2 align test_bowtie2_align_paired_end_large_index
command: nextflow run tests/modules/bowtie2/align -entry test_bowtie2_align_paired_end_large_index -c tests/config/nextflow.config -c tests/modules/bowtie2/align/nextflow.config --force_large_index
tags:
- bowtie2
- bowtie2/align
files:
- path: ./output/bowtie2/test.bam
- path: ./output/bowtie2/test.bowtie2.log
- path: ./output/bowtie2/bowtie2/genome.3.bt2l
md5sum: 8952b3e0b1ce9a7a5916f2e147180853
- path: ./output/bowtie2/bowtie2/genome.2.bt2l
md5sum: 22c284084784a0720989595e0c9461fd
- path: ./output/bowtie2/bowtie2/genome.1.bt2l
md5sum: 07d811cd4e350d56267183d2ac7023a5
- path: ./output/bowtie2/bowtie2/genome.4.bt2l
md5sum: c25be5f8b0378abf7a58c8a880b87626
- path: ./output/bowtie2/bowtie2/genome.rev.1.bt2l
md5sum: fda48e35925fb24d1c0785f021981e25
- path: ./output/bowtie2/bowtie2/genome.rev.2.bt2l
md5sum: 802c26d32b970e1b105032b7ce7348b4
- path: ./output/bowtie2/versions.yml

View file

@ -0,0 +1,44 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { CUSTOM_SRATOOLSNCBISETTINGS } from '../../../../modules/custom/sratoolsncbisettings/main.nf'
workflow test_sratoolsncbisettings_with_good_existing {
file(params.settings_path).mkdirs()
def settings = file(params.test_data['generic']['config']['ncbi_user_settings'], checkIfExists: true)
settings.copyTo(params.settings_file)
CUSTOM_SRATOOLSNCBISETTINGS()
}
workflow test_sratoolsncbisettings_with_bad_existing {
file(params.settings_path).mkdirs()
def settings = file(params.settings_file)
settings.text = '''
## auto-generated configuration file - DO NOT EDIT ##
config/default = "false"
/repository/remote/main/CGI/resolver-cgi = "https://trace.ncbi.nlm.nih.gov/Traces/names/names.fcgi"
/repository/remote/protected/CGI/resolver-cgi = "https://trace.ncbi.nlm.nih.gov/Traces/names/names.fcgi"
/repository/user/ad/public/apps/file/volumes/flatAd = "."
/repository/user/ad/public/apps/refseq/volumes/refseqAd = "."
/repository/user/ad/public/apps/sra/volumes/sraAd = "."
/repository/user/ad/public/apps/sraPileup/volumes/ad = "."
/repository/user/ad/public/apps/sraRealign/volumes/ad = "."
/repository/user/ad/public/apps/wgs/volumes/wgsAd = "."
/repository/user/ad/public/root = "."
/repository/user/default-path = "/root/ncbi"
'''.stripIndent()
CUSTOM_SRATOOLSNCBISETTINGS()
}
workflow test_sratoolsncbisettings_with_nonexisting {
def settings = file(params.settings_file)
settings.delete()
CUSTOM_SRATOOLSNCBISETTINGS()
}

View file

@ -0,0 +1,8 @@
params.settings_path = '/tmp/.ncbi'
params.settings_file = "${params.settings_path}/user-settings.mkfg"
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,17 @@
params.settings_path = '/tmp/.ncbi'
params.settings_file = "${params.settings_path}/user-settings.mkfg"
env.NCBI_SETTINGS = params.settings_file
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: CUSTOM_SRATOOLSNCBISETTINGS {
containerOptions = {
(workflow.containerEngine == 'singularity') ?
"-B ${params.settings_path}:${params.settings_path}" :
"-v ${params.settings_path}:${params.settings_path}"
}
}
}

View file

@ -0,0 +1,44 @@
- name: "custom sratoolsncbisettings test_sratoolsncbisettings_with_good_existing"
command: nextflow run ./tests/modules/custom/sratoolsncbisettings -entry test_sratoolsncbisettings_with_good_existing -c ./tests/config/nextflow.config -c ./tests/modules/custom/sratoolsncbisettings/nextflow_mount.config
tags:
- "custom"
- "custom/sratoolsncbisettings"
files:
- path: "output/custom/user-settings.mkfg"
md5sum: 955e27aff2c277c2f1f0943a098888c1
- path: output/custom/versions.yml
contains:
- "sratools: 2.11.0"
- name: "custom sratoolsncbisettings test_sratoolsncbisettings_with_bad_existing"
command: nextflow run ./tests/modules/custom/sratoolsncbisettings -entry test_sratoolsncbisettings_with_bad_existing -c ./tests/config/nextflow.config -c ./tests/modules/custom/sratoolsncbisettings/nextflow_mount.config
tags:
- "custom"
- "custom/sratoolsncbisettings"
exit_code: 1
stdout:
contains:
- "Command error:"
- "missing the required entries"
- "/LIBS/GUID"
- "/libs/cloud/report_instance_identity"
- "Feel free to add the following"
files:
- path: "output/custom/user-settings.mkfg"
should_exist: false
- path: output/custom/versions.yml
should_exist: false
- name: "custom sratoolsncbisettings test_sratoolsncbisettings_with_nonexisting"
command: nextflow run ./tests/modules/custom/sratoolsncbisettings -entry test_sratoolsncbisettings_with_nonexisting -c ./tests/config/nextflow.config -c ./tests/modules/custom/sratoolsncbisettings/nextflow.config
tags:
- "custom"
- "custom/sratoolsncbisettings"
files:
- path: "output/custom/user-settings.mkfg"
contains:
- "/LIBS/GUID"
- "/libs/cloud/report_instance_identity"
- path: output/custom/versions.yml
contains:
- "sratools: 2.11.0"

View file

@ -17,11 +17,11 @@ workflow test_gatk4_mergebamalignment {
workflow test_gatk4_mergebamalignment_stubs {
input = [ [ id:'test' ], // meta map
"test_foo.bam",
"test_bar.bam"
file(params.test_data['sarscov2']['illumina']['test_single_end_bam'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_unaligned_bam'], checkIfExists: true)
]
fasta = "genome.fasta"
dict = "genome.fasta.dict"
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
dict = file(params.test_data['sarscov2']['genome']['genome_dict'], checkIfExists: true)
GATK4_MERGEBAMALIGNMENT ( input, fasta, dict )
}

View file

@ -9,7 +9,7 @@
- path: output/gatk4/versions.yml
- name: gatk4 mergebamalignment test_gatk4_mergebamalignment_stubs
command: nextflow run ./tests/modules/gatk4/mergebamalignment -entry test_gatk4_mergebamalignment -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/mergebamalignment/nextflow.config -stub-run
command: nextflow run ./tests/modules/gatk4/mergebamalignment -entry test_gatk4_mergebamalignment_stubs -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/mergebamalignment/nextflow.config -stub-run
tags:
- gatk4
- gatk4/mergebamalignment

View file

@ -120,23 +120,23 @@ workflow test_gatk4_mutect2_mitochondria {
}
workflow test_gatk4_mutect2_tumor_normal_pair_f1r2_stubs {
input = [ [ id:'test', normal_id:'normal', tumor_id:'tumour' ], // meta map
[ "foo_paired.bam",
"foo_paired2.bam"
input = [ [ id:'test', normal_id:'normal', tumor_id:'tumour' ], // meta map
[ file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_recalibrated_sorted_bam'], checkIfExists: true)
],
[ "foo_paired.bam.bai",
"foo_paired2.bam.bai"
[ file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_bam_bai'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_recalibrated_sorted_bam_bai'], checkIfExists: true)
],
[]
]
fasta = "genome.fasta"
fai = "genome.fasta.fai"
dict = "genome.fasta.dict"
germline_resource = "genome_gnomAD.r2.1.1.vcf.gz"
germline_resource_tbi = "genome_gnomAD.r2.1.1.vcf.gz.tbi"
panel_of_normals = "genome_mills_and_1000G.indels.hg38.vcf.gz"
panel_of_normals_tbi = "genome_mills_and_1000G.indels.hg38.vcf.gz.tbi"
fasta = file(params.test_data['homo_sapiens']['genome']['genome_21_fasta'], checkIfExists: true)
fai = file(params.test_data['homo_sapiens']['genome']['genome_21_fasta_fai'], checkIfExists: true)
dict = file(params.test_data['homo_sapiens']['genome']['genome_21_dict'], checkIfExists: true)
germline_resource = file(params.test_data['homo_sapiens']['genome']['gnomad_r2_1_1_21_vcf_gz'], checkIfExists: true)
germline_resource_tbi = file(params.test_data['homo_sapiens']['genome']['gnomad_r2_1_1_21_vcf_gz_tbi'], checkIfExists: true)
panel_of_normals = file(params.test_data['homo_sapiens']['genome']['mills_and_1000g_indels_21_vcf_gz'], checkIfExists: true)
panel_of_normals_tbi = file(params.test_data['homo_sapiens']['genome']['mills_and_1000g_indels_21_vcf_gz_tbi'], checkIfExists: true)
GATK4_MUTECT2_F1R2 ( input, fasta, fai, dict, germline_resource, germline_resource_tbi, panel_of_normals, panel_of_normals_tbi )
}

View file

@ -71,7 +71,7 @@
- path: output/gatk4/versions.yml
- name: gatk4 mutect2 test_gatk4_mutect2_tumor_normal_pair_f1r2_stubs
command: nextflow run ./tests/modules/gatk4/mutect2 -entry test_gatk4_mutect2_tumor_normal_pair_f1r2 -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/mutect2/nextflow.config -stub-run
command: nextflow run ./tests/modules/gatk4/mutect2 -entry test_gatk4_mutect2_tumor_normal_pair_f1r2_stubs -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/mutect2/nextflow.config -stub-run
tags:
- gatk4
- gatk4/mutect2

View file

@ -14,7 +14,7 @@ workflow test_gatk4_revertsam {
workflow test_gatk4_revertsam_stubs {
input = [ [ id:'test' ], // meta map
"foo_paired_end.bam"
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true)
]
GATK4_REVERTSAM ( input )

View file

@ -9,7 +9,7 @@
- path: output/gatk4/versions.yml
- name: gatk4 revertsam test_gatk4_revertsam_stubs
command: nextflow run ./tests/modules/gatk4/revertsam -entry test_gatk4_revertsam -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/revertsam/nextflow.config -stub-run
command: nextflow run ./tests/modules/gatk4/revertsam -entry test_gatk4_revertsam_stubs -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/revertsam/nextflow.config -stub-run
tags:
- gatk4
- gatk4/revertsam

View file

@ -21,8 +21,8 @@ workflow test_gatk4_samtofastq_paired_end {
}
workflow test_gatk4_samtofastq_paired_end_stubs {
input = [ [ id:'test', single_end: false ], // meta map
[ "foo_paired_end.bam" ]
input = [ [ id:'test', single_end: true ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_single_end_bam'], checkIfExists: true) ]
]
GATK4_SAMTOFASTQ ( input )

View file

@ -21,7 +21,7 @@
- path: output/gatk4/versions.yml
- name: gatk4 samtofastq test_gatk4_samtofastq_paired_end_stubs
command: nextflow run ./tests/modules/gatk4/samtofastq -entry test_gatk4_samtofastq_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/samtofastq/nextflow.config -stub-run
command: nextflow run ./tests/modules/gatk4/samtofastq -entry test_gatk4_samtofastq_paired_end_stubs -c ./tests/config/nextflow.config -c ./tests/modules/gatk4/samtofastq/nextflow.config -stub-run
tags:
- gatk4
- gatk4/samtofastq

View file

@ -0,0 +1,33 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { GATK4_SPLITINTERVALS } from '../../../../modules/gatk4/splitintervals/main.nf'
workflow test_gatk4_splitintervals_bed {
input = [
[ id:'test' ], // meta map
file(params.test_data['homo_sapiens']['genome']['genome_multi_interval_bed'], checkIfExists: true)
]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
fasta_fai = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
fasta_dict = file(params.test_data['homo_sapiens']['genome']['genome_dict'], checkIfExists: true)
GATK4_SPLITINTERVALS ( input, fasta, fasta_fai, fasta_dict)
}
workflow test_gatk4_splitintervals_intervals {
input = [
[ id:'test' ], // meta map
file(params.test_data['homo_sapiens']['genome']['genome_interval_list'], checkIfExists: true)
]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
fasta_fai = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
fasta_dict = file(params.test_data['homo_sapiens']['genome']['genome_dict'], checkIfExists: true)
GATK4_SPLITINTERVALS ( input, fasta, fasta_fai, fasta_dict)
}

View file

@ -0,0 +1,9 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: GATK4_SPLITINTERVALS {
ext.args = "--scatter-count 2"
}
}

View file

@ -0,0 +1,23 @@
- name: gatk4 splitintervals test_gatk4_splitintervals_bed
command: nextflow run tests/modules/gatk4/splitintervals -entry test_gatk4_splitintervals_bed -c tests/config/nextflow.config
tags:
- gatk4/splitintervals
- gatk4
files:
- path: output/gatk4/test/0000-scattered.interval_list
md5sum: c8d6b19e7a92535b6ce9608eae558faa
- path: output/gatk4/test/0001-scattered.interval_list
md5sum: b1877ad96aec308906594c50ebbe3ded
- path: output/gatk4/versions.yml
- name: gatk4 splitintervals test_gatk4_splitintervals_intervals
command: nextflow run tests/modules/gatk4/splitintervals -entry test_gatk4_splitintervals_intervals -c tests/config/nextflow.config
tags:
- gatk4/splitintervals
- gatk4
files:
- path: output/gatk4/test/0000-scattered.interval_list
md5sum: ebd6b34a335efc6732ff541936c6d2d5
- path: output/gatk4/test/0001-scattered.interval_list
md5sum: 9459b0e124fa84ec1e64ac4615bc9af7
- path: output/gatk4/versions.yml

View file

@ -0,0 +1,19 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { MERYL_COUNT } from '../../../modules/meryl/count/main.nf'
include { MERYL_HISTOGRAM } from '../../../modules/meryl/histogram/main.nf'
include { GENOMESCOPE2 } from '../../../modules/genomescope2/main.nf'
workflow test_genomescope2 {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['bacteroides_fragilis']['illumina']['test1_1_fastq_gz'], checkIfExists: true)
]
MERYL_COUNT ( input )
MERYL_HISTOGRAM ( MERYL_COUNT.out.meryl_db )
GENOMESCOPE2 ( MERYL_HISTOGRAM.out.hist )
}

View file

@ -0,0 +1,13 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: 'MERYL.*' {
ext.args = 'k=21'
}
withName: 'GENOMESCOPE2' {
ext.args = '-k 21 -p 1'
}
}

View file

@ -0,0 +1,22 @@
- name: genomescope2 test_genomescope2
command: nextflow run tests/modules/genomescope2 -entry test_genomescope2 -c tests/config/nextflow.config
tags:
- genomescope2
files:
- path: output/genomescope2/test_linear_plot.png
md5sum: 94c165c5028156299a1d4d05766cac51
- path: output/genomescope2/test_log_plot.png
md5sum: 9d25ca463d92a0c73a893da7fd3979ba
- path: output/genomescope2/test_model.txt
md5sum: 3caf62f715f64a2f2b8fdff5d079cb84
- path: output/genomescope2/test_summary.txt
md5sum: 7452860e2cea99b85f3ff60daeac77f5
- path: output/genomescope2/test_transformed_linear_plot.png
md5sum: 99a64c1c18d8670f64cb863d4334abbb
- path: output/genomescope2/test_transformed_log_plot.png
md5sum: b4e029c9fb9987ca33b17392a691c1b4
- path: output/genomescope2/versions.yml
md5sum: 18afeb26f62a47f680b2bb3e27da9cbc
- path: output/meryl/test.hist
md5sum: f75362ab9cd70d96621b3690e952085f
- path: output/meryl/versions.yml

View file

@ -0,0 +1,28 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { MERYL_COUNT } from '../../../../modules/meryl/count/main.nf'
workflow test_meryl_count_single_end {
input = [
[ id:'test' , single_end: true ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
MERYL_COUNT ( input )
}
workflow test_meryl_count_paired_end {
input = [
[ id:'test' , single_end: false ], // meta map
[
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true)
]
]
MERYL_COUNT ( input )
}

View file

@ -0,0 +1,6 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
ext.args = 'k=21'
}

View file

@ -0,0 +1,17 @@
- name: meryl count test_meryl_count_single_end
command: nextflow run tests/modules/meryl/count -entry test_meryl_count_single_end -c tests/config/nextflow.config
tags:
- meryl/count
- meryl
files:
- path: output/meryl/versions.yml
md5sum: 5fe537d873925ccbcc4edf0983e9eda0
- name: meryl count test_meryl_count_paired_end
command: nextflow run tests/modules/meryl/count -entry test_meryl_count_paired_end -c tests/config/nextflow.config
tags:
- meryl/count
- meryl
files:
- path: output/meryl/versions.yml
md5sum: 4961f13cfb60ba8764ed666e70dbf12c

View file

@ -0,0 +1,17 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { MERYL_COUNT } from '../../../../modules/meryl/count/main.nf'
include { MERYL_HISTOGRAM } from '../../../../modules/meryl/histogram/main.nf'
workflow test_meryl_histogram {
input = [
[ id:'test' ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
MERYL_COUNT ( input )
MERYL_HISTOGRAM ( MERYL_COUNT.out.meryl_db )
}

View file

@ -0,0 +1,6 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
ext.args = 'k=21'
}

View file

@ -0,0 +1,10 @@
- name: meryl histogram test_meryl_histogram
command: nextflow run tests/modules/meryl/histogram -entry test_meryl_histogram -c tests/config/nextflow.config
tags:
- meryl/histogram
- meryl
files:
- path: output/meryl/test.hist
md5sum: 4bfdc8b287ee0cfd9922bbfa8cd64650
- path: output/meryl/versions.yml
md5sum: 050038f1b1df79977a393cce1b4b2ddb

View file

@ -0,0 +1,31 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { MERYL_COUNT } from '../../../../modules/meryl/count/main.nf'
include { MERYL_UNIONSUM } from '../../../../modules/meryl/unionsum/main.nf'
workflow test_meryl_unionsum_single_end {
input = [
[ id:'test', single_end: true ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
MERYL_COUNT ( input )
MERYL_UNIONSUM ( MERYL_COUNT.out.meryl_db )
}
workflow test_meryl_unionsum_paired_end {
input = [
[ id:'test', single_end: false ], // meta map
[
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true)
]
]
MERYL_COUNT ( input )
MERYL_UNIONSUM ( MERYL_COUNT.out.meryl_db )
}

View file

@ -0,0 +1,6 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
ext.args = 'k=21'
}

View file

@ -0,0 +1,17 @@
- name: meryl unionsum test_meryl_unionsum_single_end
command: nextflow run tests/modules/meryl/unionsum -entry test_meryl_unionsum_single_end -c tests/config/nextflow.config
tags:
- meryl
- meryl/unionsum
files:
- path: output/meryl/versions.yml
md5sum: 7de859c6d3a29d72f6c9c976609d0913
- name: meryl unionsum test_meryl_unionsum_paired_end
command: nextflow run tests/modules/meryl/unionsum -entry test_meryl_unionsum_paired_end -c tests/config/nextflow.config
tags:
- meryl
- meryl/unionsum
files:
- path: output/meryl/versions.yml
md5sum: a16decdec014ccb9bdab69a4a1d30818

View file

@ -0,0 +1,69 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { RTGTOOLS_VCFEVAL } from '../../../../modules/rtgtools/vcfeval/main.nf'
include { UNTAR } from '../../../modules/untar/main.nf'
workflow test_rtgtools_vcfeval {
input = [
[ id:'test' ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test2_haplotc_vcf_gz'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_haplotc_vcf_gz_tbi'], checkIfExists: true),
]
truth = [
file(params.test_data['homo_sapiens']['illumina']['test2_haplotc_ann_vcf_gz'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_haplotc_ann_vcf_gz_tbi'], checkIfExists: true)
]
truth_regions = file(params.test_data['homo_sapiens']['genome']['genome_21_multi_interval_bed'], checkIfExists: true)
evaluation_regions = file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true)
compressed_sdf = [
[],
file(params.test_data['homo_sapiens']['genome']['genome_21_sdf'])
]
sdf = UNTAR( compressed_sdf ).untar
.map({
meta, folder ->
folder
})
RTGTOOLS_VCFEVAL ( input, truth, truth_regions, evaluation_regions, sdf )
}
workflow test_rtgtools_vcfeval_no_optional_inputs {
input = [
[ id:'test' ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test2_haplotc_vcf_gz'], checkIfExists: true),
[],
]
truth = [
file(params.test_data['homo_sapiens']['illumina']['test2_haplotc_ann_vcf_gz'], checkIfExists: true),
[]
]
truth_regions = []
evaluation_regions = []
compressed_sdf = [
[],
file(params.test_data['homo_sapiens']['genome']['genome_21_sdf'])
]
sdf = UNTAR( compressed_sdf ).untar
.map({
meta, folder ->
[folder]
})
RTGTOOLS_VCFEVAL ( input, truth, truth_regions, evaluation_regions, sdf )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,75 @@
- name: rtgtools vcfeval test_rtgtools_vcfeval
command: nextflow run tests/modules/rtgtools/vcfeval -entry test_rtgtools_vcfeval -c tests/config/nextflow.config
tags:
- rtgtools
- rtgtools/vcfeval
files:
- path: output/rtgtools/test_results/done
- path: output/rtgtools/test_results/fn.vcf.gz
md5sum: be9c9106055bfad4c5985bc0d33efd56
- path: output/rtgtools/test_results/fn.vcf.gz.tbi
md5sum: 092a7a3162e7cff25d273525751eb284
- path: output/rtgtools/test_results/fp.vcf.gz
md5sum: e0f0ff841dc63e9fb61fd3a5db137ced
- path: output/rtgtools/test_results/fp.vcf.gz.tbi
md5sum: 092a7a3162e7cff25d273525751eb284
- path: output/rtgtools/test_results/non_snp_roc.tsv.gz
md5sum: ad5bad32c48f05aef232e2c0e708877a
- path: output/rtgtools/test_results/phasing.txt
md5sum: 133677dbd8be657439ea2b03fdfb8795
- path: output/rtgtools/test_results/progress
- path: output/rtgtools/test_results/snp_roc.tsv.gz
md5sum: 6785b83d66486e7e6c75c5a5b1574c09
- path: output/rtgtools/test_results/summary.txt
md5sum: f4c8df93c8bdab603036bbc27b4a28c3
- path: output/rtgtools/test_results/tp-baseline.vcf.gz
md5sum: be9c9106055bfad4c5985bc0d33efd56
- path: output/rtgtools/test_results/tp-baseline.vcf.gz.tbi
md5sum: 092a7a3162e7cff25d273525751eb284
- path: output/rtgtools/test_results/tp.vcf.gz
md5sum: e0f0ff841dc63e9fb61fd3a5db137ced
- path: output/rtgtools/test_results/tp.vcf.gz.tbi
md5sum: 092a7a3162e7cff25d273525751eb284
- path: output/rtgtools/test_results/vcfeval.log
- path: output/rtgtools/test_results/weighted_roc.tsv.gz
md5sum: fa7c046ea0084172f1ef91f19de07b2b
- path: output/rtgtools/versions.yml
md5sum: 270ed7a5a8e347b251eb4aa2198f98e8
- name: rtgtools vcfeval test_rtgtools_vcfeval_no_optional_inputs
command: nextflow run tests/modules/rtgtools/vcfeval -entry test_rtgtools_vcfeval_no_optional_inputs -c tests/config/nextflow.config
tags:
- rtgtools
- rtgtools/vcfeval
files:
- path: output/rtgtools/test_results/done
- path: output/rtgtools/test_results/fn.vcf.gz
md5sum: c11c889a4f42c8ea325748bd768ea34d
- path: output/rtgtools/test_results/fn.vcf.gz.tbi
md5sum: 092a7a3162e7cff25d273525751eb284
- path: output/rtgtools/test_results/fp.vcf.gz
md5sum: 138e85c1cd79f8fea9a33e81ce0c734c
- path: output/rtgtools/test_results/fp.vcf.gz.tbi
md5sum: 092a7a3162e7cff25d273525751eb284
- path: output/rtgtools/test_results/non_snp_roc.tsv.gz
md5sum: 34fb78a008dfc0bef02807b8a7012b07
- path: output/rtgtools/test_results/phasing.txt
md5sum: 133677dbd8be657439ea2b03fdfb8795
- path: output/rtgtools/test_results/progress
- path: output/rtgtools/test_results/snp_roc.tsv.gz
md5sum: a4c5761c2653e2d04fc84c1cea13b1f0
- path: output/rtgtools/test_results/summary.txt
md5sum: f33feb32f84958fb931063044fba369b
- path: output/rtgtools/test_results/tp-baseline.vcf.gz
md5sum: d1c2d990899edf127ea5fcca8866fcb0
- path: output/rtgtools/test_results/tp-baseline.vcf.gz.tbi
md5sum: 3307008fea47adb75c46d395c5567bc0
- path: output/rtgtools/test_results/tp.vcf.gz
md5sum: e35b4dab82894eee9b77c81f9bc89cca
- path: output/rtgtools/test_results/tp.vcf.gz.tbi
md5sum: 45d8f8793140944f129e728299918c88
- path: output/rtgtools/test_results/vcfeval.log
- path: output/rtgtools/test_results/weighted_roc.tsv.gz
md5sum: 5b8efc9e9381f604880412800f58e4e9
- path: output/rtgtools/versions.yml
md5sum: 55568e4bbe5ab7e634a1f392abb89cc4

View file

@ -1,17 +0,0 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SAMTOOLS_BAMTOCRAM } from '../../../../modules/samtools/bamtocram/main.nf'
workflow test_samtools_bamtocram {
input = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam_bai'], checkIfExists: true)]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['sarscov2']['genome']['genome_fasta_fai'], checkIfExists: true)
SAMTOOLS_BAMTOCRAM ( input, fasta, fai )
}

View file

@ -1,9 +0,0 @@
- name: samtools bamtocram test_samtools_bamtocram
command: nextflow run ./tests/modules/samtools/bamtocram -entry test_samtools_bamtocram -c ./tests/config/nextflow.config -c ./tests/modules/samtools/bamtocram/nextflow.config
tags:
- samtools/bamtocram
- samtools
files:
- path: output/samtools/test.cram
- path: output/samtools/test.cram.crai
- path: output/samtools/versions.yml

View file

@ -0,0 +1,31 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SAMTOOLS_CONVERT as SAMTOOLS_BAMTOCRAM } from '../../../../modules/samtools/convert/main.nf'
include { SAMTOOLS_CONVERT as SAMTOOLS_CRAMTOBAM } from '../../../../modules/samtools/convert/main.nf'
workflow test_samtools_convert_bamtocram {
input = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam_bai'], checkIfExists: true)]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['sarscov2']['genome']['genome_fasta_fai'], checkIfExists: true)
SAMTOOLS_BAMTOCRAM ( input, fasta, fai )
}
workflow test_samtools_convert_cramtobam {
input = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_cram'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_cram_crai'], checkIfExists: true)
]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
SAMTOOLS_CRAMTOBAM ( input, fasta, fai )
}

View file

@ -0,0 +1,12 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName:SAMTOOLS_BAMTOCRAM{
ext.args = "-C"
}
withName:SAMTOOLS_CRAMTOBAM{
ext.args = "-b"
}
}

View file

@ -0,0 +1,21 @@
- name: samtools convert test_samtools_convert_bamtocram
command: nextflow run tests/modules/samtools/convert -entry test_samtools_convert_bamtocram -c tests/config/nextflow.config
tags:
- samtools
- samtools/convert
files:
- path: output/samtools/test.cram
- path: output/samtools/test.cram.crai
- path: output/samtools/versions.yml
- name: samtools convert test_samtools_convert_cramtobam
command: nextflow run tests/modules/samtools/convert -entry test_samtools_convert_cramtobam -c tests/config/nextflow.config
tags:
- samtools
- samtools/convert
files:
- path: output/samtools/test.bam
md5sum: c262b6dc15f9b480bdb47d6d018b4b56
- path: output/samtools/test.bam.bai
md5sum: 6e8f5034f728401bfa841c8e70c62463
- path: output/samtools/versions.yml

View file

@ -25,7 +25,7 @@ workflow test_samtools_view_cram {
workflow test_samtools_view_stubs {
input = [ [ id:'test', single_end:false ], // meta map
"foo_paired_end.bam",
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true),
[]
]

View file

@ -16,7 +16,7 @@
- path: output/samtools/test.cram
- name: samtools view test_samtools_view_stubs
command: nextflow run ./tests/modules/samtools/view -entry test_samtools_view -c ./tests/config/nextflow.config -c ./tests/modules/samtools/view/nextflow.config -stub-run
command: nextflow run ./tests/modules/samtools/view -entry test_samtools_view_stubs -c ./tests/config/nextflow.config -c ./tests/modules/samtools/view/nextflow.config -stub-run
tags:
- samtools/view
- samtools

View file

@ -13,7 +13,7 @@ workflow test_sratools_fasterqdump_single_end {
def input = Channel.of([ id:'test_single_end', single_end:true ])
.combine(UNTAR.out.untar.map{ it[1] })
SRATOOLS_FASTERQDUMP ( input )
SRATOOLS_FASTERQDUMP(input, file(params.test_data['generic']['config']['ncbi_user_settings'], checkIfExists: true))
}
workflow test_sratools_fasterqdump_paired_end {
@ -24,5 +24,5 @@ workflow test_sratools_fasterqdump_paired_end {
def input = Channel.of([ id:'test_paired_end', single_end:false ])
.combine(UNTAR.out.untar.map{ it[1] })
SRATOOLS_FASTERQDUMP ( input )
SRATOOLS_FASTERQDUMP(input, file(params.test_data['generic']['config']['ncbi_user_settings'], checkIfExists: true))
}

View file

@ -8,6 +8,9 @@
md5sum: 1054c7b71884acdb5eed8a378f18be82
- path: output/untar/SRR13255544/SRR13255544.sra
md5sum: 466d05dafb2eec672150754168010b4d
- path: output/sratools/versions.yml
contains:
- "sratools: 2.11.0"
- name: sratools fasterqdump test_sratools_fasterqdump_paired_end
command: nextflow run ./tests/modules/sratools/fasterqdump -entry test_sratools_fasterqdump_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/sratools/fasterqdump/nextflow.config
@ -21,3 +24,6 @@
md5sum: 3e3b3af3413f50a1685fd7b3f1456d4e
- path: output/untar/SRR11140744/SRR11140744.sra
md5sum: 065666caf5b2d5dfb0cb25d5f3abe659
- path: output/sratools/versions.yml
contains:
- "sratools: 2.11.0"

View file

@ -8,8 +8,8 @@ workflow test_sratools_prefetch {
input = [
[ id:'test', single_end:false ], // meta map
'ERR2815334'
'DRR000774'
]
SRATOOLS_PREFETCH ( input )
SRATOOLS_PREFETCH(input, file(params.test_data['generic']['config']['ncbi_user_settings'], checkIfExists: true))
}

View file

@ -4,5 +4,8 @@
- sratools/prefetch
- sratools
files:
- path: output/sratools/ERR2815334/ERR2815334.sra
md5sum: 9a98c7f6f4774b7ef94aa915b92a54ea
- path: output/sratools/DRR000774/DRR000774.sra
md5sum: 7647dba20c89c0e3d7ad13842f060eb0
- path: output/sratools/versions.yml
contains:
- "sratools: 2.11.0"

View file

@ -1,23 +0,0 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SRA_FASTQ } from '../../../../subworkflows/nf-core/sra_fastq/main.nf' addParams( [:] )
workflow test_sra_fastq_single_end {
input = [
[ id:'test_single_end', single_end:true ], // meta map
'SRR13255544'
]
SRA_FASTQ ( input )
}
workflow test_sra_fastq_paired_end {
input = [
[ id:'test_paired_end', single_end:false ], // meta map
'SRR11140744'
]
SRA_FASTQ ( input )
}

View file

@ -1,27 +0,0 @@
- name: sra fastq single-end
command: nextflow run ./tests/subworkflows/nf-core/sra_fastq -entry test_sra_fastq_single_end -c tests/config/nextflow.config
tags:
- subworkflows
# - subworkflows/sra_fastq
# Modules
# - sratools
# - sratools/prefetch
# - sratools/fasterqdump
files:
- path: output/sratools/SRR13255544.fastq.gz
md5sum: 1054c7b71884acdb5eed8a378f18be82
- name: sra fastq paired-end
command: nextflow run ./tests/subworkflows/nf-core/sra_fastq -entry test_sra_fastq_paired_end -c tests/config/nextflow.config
tags:
- subworkflows
# - subworkflows/sra_fastq
# Modules
# - sratools
# - sratools/prefetch
# - sratools/fasterqdump
files:
- path: output/sratools/SRR11140744_1.fastq.gz
md5sum: 193809c784a4ea132ab2a253fa4f55b6
- path: output/sratools/SRR11140744_2.fastq.gz
md5sum: 3e3b3af3413f50a1685fd7b3f1456d4e

View file

@ -0,0 +1,29 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SRAFASTQ } from '../../../../subworkflows/nf-core/srafastq/main.nf'
workflow test_srafastq_single_end {
input = Channel.of(
[
[ id:'test_single_end1', single_end:true ], // meta map
'DRR000774'
],
[
[ id:'test_single_end2', single_end:true ], // meta map
'DRR000775'
]
)
SRAFASTQ ( input )
}
workflow test_srafastq_paired_end {
input = [
[ id:'test_paired_end', single_end:false ], // meta map
'SRR11140744'
]
SRAFASTQ ( input )
}

View file

@ -0,0 +1,29 @@
- name: srafastq single-end
command: nextflow run ./tests/subworkflows/nf-core/srafastq -entry test_srafastq_single_end -c tests/config/nextflow.config -c tests/subworkflows/nf-core/srafastq/nextflow.config
tags:
- subworkflows
# - subworkflows/srafastq
# Modules
# - sratools
# - sratools/prefetch
# - sratools/fasterqdump
files:
- path: output/sratools/DRR000774.fastq.gz
md5sum: 19029a1132115b55277a0d79ee089b49
- path: output/sratools/DRR000775.fastq.gz
md5sum: 59ff24c86ecb260752668c059c2a1eaf
- name: srafastq paired-end
command: nextflow run ./tests/subworkflows/nf-core/srafastq -entry test_srafastq_paired_end -c tests/config/nextflow.config -c tests/subworkflows/nf-core/srafastq/nextflow.config
tags:
- subworkflows
# - subworkflows/srafastq
# Modules
# - sratools
# - sratools/prefetch
# - sratools/fasterqdump
files:
- path: output/sratools/SRR11140744_1.fastq.gz
md5sum: 193809c784a4ea132ab2a253fa4f55b6
- path: output/sratools/SRR11140744_2.fastq.gz
md5sum: 3e3b3af3413f50a1685fd7b3f1456d4e