Merge branch 'nf-core:master' into master

This commit is contained in:
FriederikeHanssen 2022-04-07 12:22:40 +02:00 committed by GitHub
commit f8b56d1248
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
72 changed files with 1454 additions and 184 deletions

View file

@ -12,14 +12,13 @@ process ADAPTERREMOVAL {
path(adapterlist) path(adapterlist)
output: output:
tuple val(meta), path("${prefix}.truncated.gz") , optional: true, emit: singles_truncated tuple val(meta), path("${prefix}.truncated.fastq.gz") , optional: true, emit: singles_truncated
tuple val(meta), path("${prefix}.discarded.gz") , optional: true, emit: discarded tuple val(meta), path("${prefix}.discarded.fastq.gz") , optional: true, emit: discarded
tuple val(meta), path("${prefix}.pair1.truncated.gz") , optional: true, emit: pair1_truncated tuple val(meta), path("${prefix}.pair{1,2}.truncated.fastq.gz") , optional: true, emit: paired_truncated
tuple val(meta), path("${prefix}.pair2.truncated.gz") , optional: true, emit: pair2_truncated tuple val(meta), path("${prefix}.collapsed.fastq.gz") , optional: true, emit: collapsed
tuple val(meta), path("${prefix}.collapsed.gz") , optional: true, emit: collapsed tuple val(meta), path("${prefix}.collapsed.truncated.fastq.gz") , optional: true, emit: collapsed_truncated
tuple val(meta), path("${prefix}.collapsed.truncated.gz") , optional: true, emit: collapsed_truncated tuple val(meta), path("${prefix}.paired.fastq.gz") , optional: true, emit: paired_interleaved
tuple val(meta), path("${prefix}.paired.gz") , optional: true, emit: paired_interleaved tuple val(meta), path('*.settings') , emit: settings
tuple val(meta), path('*.log') , emit: log
path "versions.yml" , emit: versions path "versions.yml" , emit: versions
when: when:
@ -38,10 +37,19 @@ process ADAPTERREMOVAL {
$adapterlist \\ $adapterlist \\
--basename ${prefix} \\ --basename ${prefix} \\
--threads ${task.cpus} \\ --threads ${task.cpus} \\
--settings ${prefix}.log \\
--seed 42 \\ --seed 42 \\
--gzip --gzip
ensure_fastq() {
if [ -f "\${1}" ]; then
mv "\${1}" "\${1::-3}.fastq.gz"
fi
}
ensure_fastq '${prefix}.truncated.gz'
ensure_fastq '${prefix}.discarded.gz'
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":
adapterremoval: \$(AdapterRemoval --version 2>&1 | sed -e "s/AdapterRemoval ver. //g") adapterremoval: \$(AdapterRemoval --version 2>&1 | sed -e "s/AdapterRemoval ver. //g")
@ -56,10 +64,24 @@ process ADAPTERREMOVAL {
$adapterlist \\ $adapterlist \\
--basename ${prefix} \\ --basename ${prefix} \\
--threads $task.cpus \\ --threads $task.cpus \\
--settings ${prefix}.log \\
--seed 42 \\ --seed 42 \\
--gzip --gzip
ensure_fastq() {
if [ -f "\${1}" ]; then
mv "\${1}" "\${1::-3}.fastq.gz"
fi
}
ensure_fastq '${prefix}.truncated.gz'
ensure_fastq '${prefix}.discarded.gz'
ensure_fastq '${prefix}.pair1.truncated.gz'
ensure_fastq '${prefix}.pair2.truncated.gz'
ensure_fastq '${prefix}.collapsed.gz'
ensure_fastq '${prefix}.collapsed.truncated.gz'
ensure_fastq '${prefix}.paired.gz'
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":
adapterremoval: \$(AdapterRemoval --version 2>&1 | sed -e "s/AdapterRemoval ver. //g") adapterremoval: \$(AdapterRemoval --version 2>&1 | sed -e "s/AdapterRemoval ver. //g")

View file

@ -43,43 +43,43 @@ output:
Adapter trimmed FastQ files of either single-end reads, or singleton Adapter trimmed FastQ files of either single-end reads, or singleton
'orphaned' reads from merging of paired-end data (i.e., one of the pair 'orphaned' reads from merging of paired-end data (i.e., one of the pair
was lost due to filtering thresholds). was lost due to filtering thresholds).
pattern: "*.truncated.gz" pattern: "*.truncated.fastq.gz"
- discarded: - discarded:
type: file type: file
description: | description: |
Adapter trimmed FastQ files of reads that did not pass filtering Adapter trimmed FastQ files of reads that did not pass filtering
thresholds. thresholds.
pattern: "*.discarded.gz" pattern: "*.discarded.fastq.gz"
- pair1_truncated: - pair1_truncated:
type: file type: file
description: | description: |
Adapter trimmed R1 FastQ files of paired-end reads that did not merge Adapter trimmed R1 FastQ files of paired-end reads that did not merge
with their respective R2 pair due to long templates. The respective pair with their respective R2 pair due to long templates. The respective pair
is stored in 'pair2_truncated'. is stored in 'pair2_truncated'.
pattern: "*.pair1.truncated.gz" pattern: "*.pair1.truncated.fastq.gz"
- pair2_truncated: - pair2_truncated:
type: file type: file
description: | description: |
Adapter trimmed R2 FastQ files of paired-end reads that did not merge Adapter trimmed R2 FastQ files of paired-end reads that did not merge
with their respective R1 pair due to long templates. The respective pair with their respective R1 pair due to long templates. The respective pair
is stored in 'pair1_truncated'. is stored in 'pair1_truncated'.
pattern: "*.pair2.truncated.gz" pattern: "*.pair2.truncated.fastq.gz"
- collapsed: - collapsed:
type: file type: file
description: | description: |
Collapsed FastQ of paired-end reads that successfully merged with their Collapsed FastQ of paired-end reads that successfully merged with their
respective R1 pair but were not trimmed. respective R1 pair but were not trimmed.
pattern: "*.collapsed.gz" pattern: "*.collapsed.fastq.gz"
- collapsed_truncated: - collapsed_truncated:
type: file type: file
description: | description: |
Collapsed FastQ of paired-end reads that successfully merged with their Collapsed FastQ of paired-end reads that successfully merged with their
respective R1 pair and were trimmed of adapter due to sufficient overlap. respective R1 pair and were trimmed of adapter due to sufficient overlap.
pattern: "*.collapsed.truncated.gz" pattern: "*.collapsed.truncated.fastq.gz"
- log: - log:
type: file type: file
description: AdapterRemoval log file description: AdapterRemoval log file
pattern: "*.log" pattern: "*.settings"
- versions: - versions:
type: file type: file
description: File containing software versions description: File containing software versions

View file

@ -2,10 +2,8 @@ process BIOBAMBAM_BAMMARKDUPLICATES2 {
tag "$meta.id" tag "$meta.id"
label 'process_medium' label 'process_medium'
conda (params.enable_conda ? "bioconda::biobambam=2.0.182" : null) conda (params.enable_conda ? "bioconda::biobambam=2.0.183" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 'https://depot.galaxyproject.org/singularity/biobambam:2.0.183--h9f5acd7_1' : 'quay.io/biocontainers/biobambam:2.0.183--h9f5acd7_1'}"
'https://depot.galaxyproject.org/singularity/biobambam:2.0.182--h7d875b9_0':
'quay.io/biocontainers/biobambam:2.0.182--h7d875b9_0' }"
input: input:
tuple val(meta), path(bam) tuple val(meta), path(bam)

View file

@ -0,0 +1,46 @@
process BIOBAMBAM_BAMSORMADUP {
tag "$meta.id"
label "process_medium"
conda (params.enable_conda ? "bioconda::biobambam=2.0.183" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 'https://depot.galaxyproject.org/singularity/biobambam:2.0.183--h9f5acd7_1' : 'quay.io/biocontainers/biobambam:2.0.183--h9f5acd7_1'}"
input:
tuple val(meta), path(bams)
path(fasta)
output:
tuple val(meta), path("*.{bam,cram}") ,emit: bam
tuple val(meta), path("*.bam.bai") ,optional:true, emit: bam_index
tuple val(meta), path("*.metrics.txt") ,emit: metrics
path "versions.yml" ,emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def suffix = args.contains("outputformat=cram") ? "cram" : "bam"
def input_string = bams.join(" I=")
if (args.contains("outputformat=cram") && reference == null) error "Reference required for CRAM output."
"""
bamcat \\
I=${input_string} \\
level=0 \\
| bamsormadup \\
$args \\
M=${prefix}.metrics.txt \\
tmpfile=$prefix \\
threads=$task.cpus \\
> ${prefix}.${suffix}
cat <<-END_VERSIONS > versions.yml
"${task.process}":
bamcat: \$(echo \$(bamsormadup --version 2>&1) | sed 's/^This is biobambam2 version //; s/..biobambam2 is .*\$//' )
bamsormadup: \$(echo \$(bamsormadup --version 2>&1) | sed 's/^This is biobambam2 version //; s/..biobambam2 is .*\$//' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,52 @@
name: biobambam_bamsormadup
description: Parallel sorting and duplicate marking
keywords:
- markduplicates
- sort
- bam
- cram
tools:
- biobambam:
description: |
biobambam is a set of tools for early stage alignment file processing.
homepage: https://gitlab.com/german.tischler/biobambam2
documentation: https://gitlab.com/german.tischler/biobambam2/-/blob/master/README.md
doi: 10.1186/1751-0473-9-13
licence: ["GPL v3"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bams:
type: file
description: List containing 1 or more bam files
- fasta:
type: file
description: Reference genome in FASTA format (optional)
pattern: "*.{fa,fasta}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: BAM/CRAM file with duplicate reads marked/removed
pattern: "*.{bam,cram}"
- bam_index:
type: file
description: BAM index file
pattern: "*.{bai}"
- metrics:
type: file
description: Duplicate metrics file generated by biobambam
pattern: "*.{metrics.txt}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@matthdsm"

View file

@ -49,6 +49,8 @@ process CAT_CAT {
""" """
stub: stub:
def file_list = files_in.collect { it.toString() }
prefix = task.ext.prefix ?: "${meta.id}${file_list[0].substring(file_list[0].lastIndexOf('.'))}"
""" """
touch $prefix touch $prefix

View file

@ -1,4 +1,4 @@
process CENTRIFUGE { process CENTRIFUGE_CENTRIFUGE {
tag "$meta.id" tag "$meta.id"
label 'process_high' label 'process_high'
@ -17,7 +17,6 @@ process CENTRIFUGE {
output: output:
tuple val(meta), path('*report.txt') , emit: report tuple val(meta), path('*report.txt') , emit: report
tuple val(meta), path('*results.txt') , emit: results tuple val(meta), path('*results.txt') , emit: results
tuple val(meta), path('*kreport.txt') , emit: kreport
tuple val(meta), path('*.sam') , optional: true, emit: sam tuple val(meta), path('*.sam') , optional: true, emit: sam
tuple val(meta), path('*.mapped.fastq{,.1,.2}.gz') , optional: true, emit: fastq_mapped tuple val(meta), path('*.mapped.fastq{,.1,.2}.gz') , optional: true, emit: fastq_mapped
tuple val(meta), path('*.unmapped.fastq{,.1,.2}.gz') , optional: true, emit: fastq_unmapped tuple val(meta), path('*.unmapped.fastq{,.1,.2}.gz') , optional: true, emit: fastq_unmapped
@ -30,7 +29,6 @@ process CENTRIFUGE {
def args = task.ext.args ?: '' def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
def paired = meta.single_end ? "-U ${reads}" : "-1 ${reads[0]} -2 ${reads[1]}" def paired = meta.single_end ? "-U ${reads}" : "-1 ${reads[0]} -2 ${reads[1]}"
def db_name = db.toString().replace(".tar.gz","")
def unaligned = '' def unaligned = ''
def aligned = '' def aligned = ''
if (meta.single_end) { if (meta.single_end) {
@ -42,9 +40,10 @@ process CENTRIFUGE {
} }
def sam_output = sam_format ? "--out-fmt 'sam'" : '' def sam_output = sam_format ? "--out-fmt 'sam'" : ''
""" """
tar -xf $db ## we add "-no-name ._" to ensure silly Mac OSX metafiles files aren't included
db_name=`find -L ${db} -name "*.1.cf" -not -name "._*" | sed 's/.1.cf//'`
centrifuge \\ centrifuge \\
-x $db_name \\ -x \$db_name \\
-p $task.cpus \\ -p $task.cpus \\
$paired \\ $paired \\
--report-file ${prefix}.report.txt \\ --report-file ${prefix}.report.txt \\
@ -53,7 +52,6 @@ process CENTRIFUGE {
$aligned \\ $aligned \\
$sam_output \\ $sam_output \\
$args $args
centrifuge-kreport -x $db_name ${prefix}.results.txt > ${prefix}.kreport.txt
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -1,4 +1,4 @@
name: centrifuge name: centrifuge_centrifuge
description: Classifies metagenomic sequence data description: Classifies metagenomic sequence data
keywords: keywords:
- classify - classify
@ -25,8 +25,7 @@ input:
respectively. respectively.
- db: - db:
type: directory type: directory
description: Centrifuge database in .tar.gz format description: Path to directory containing centrifuge database files
pattern: "*.tar.gz"
- save_unaligned: - save_unaligned:
type: value type: value
description: If true unmapped fastq files are saved description: If true unmapped fastq files are saved
@ -49,12 +48,6 @@ output:
description: | description: |
File containing classification results File containing classification results
pattern: "*.{results.txt}" pattern: "*.{results.txt}"
- kreport:
type: file
description: |
File containing kraken-style report from centrifuge
out files.
pattern: "*.{kreport.txt}"
- fastq_unmapped: - fastq_unmapped:
type: file type: file
description: Unmapped fastq files description: Unmapped fastq files

View file

@ -4,7 +4,7 @@ process CNVPYTOR_CALLCNVS {
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null) conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:A1.0--py39h6a678da_2': 'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }" 'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
input: input:
@ -30,4 +30,15 @@ process CNVPYTOR_CALLCNVS {
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' )) cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS END_VERSIONS
""" """
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.tsv
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS
"""
} }

View file

@ -4,7 +4,7 @@ process CNVPYTOR_HISTOGRAM {
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null) conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:A1.0--py39h6a678da_2': 'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }" 'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
input: input:
@ -29,4 +29,14 @@ process CNVPYTOR_HISTOGRAM {
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' )) cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS END_VERSIONS
""" """
stub:
"""
touch ${pytor.baseName}.pytor
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS
"""
} }

View file

@ -4,7 +4,7 @@ process CNVPYTOR_IMPORTREADDEPTH {
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null) conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:A1.0--py39h6a678da_2': 'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }" 'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
input: input:
@ -35,4 +35,15 @@ process CNVPYTOR_IMPORTREADDEPTH {
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' )) cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS END_VERSIONS
""" """
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.pytor
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS
"""
} }

View file

@ -4,7 +4,7 @@ process CNVPYTOR_PARTITION {
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null) conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:A1.0--py39h6a678da_2': 'https://depot.galaxyproject.org/singularity/cnvpytor:1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }" 'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
input: input:
@ -18,7 +18,7 @@ process CNVPYTOR_PARTITION {
task.ext.when == null || task.ext.when task.ext.when == null || task.ext.when
script: script:
def args = task.ext.args ?: '1000' def args = task.ext.args ?: ''
""" """
cnvpytor \\ cnvpytor \\
-root $pytor \\ -root $pytor \\
@ -29,4 +29,14 @@ process CNVPYTOR_PARTITION {
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' )) cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS END_VERSIONS
""" """
stub:
"""
touch ${pytor.baseName}.pytor
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS
"""
} }

View file

@ -2,27 +2,28 @@ process DASTOOL_DASTOOL {
tag "$meta.id" tag "$meta.id"
label 'process_medium' label 'process_medium'
conda (params.enable_conda ? "bioconda::das_tool=1.1.3" : null) conda (params.enable_conda ? "bioconda::das_tool=1.1.4" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/das_tool:1.1.3--r41hdfd78af_0' : 'https://depot.galaxyproject.org/singularity/das_tool:1.1.4--r41hdfd78af_1' :
'quay.io/biocontainers/das_tool:1.1.3--r41hdfd78af_0' }" 'quay.io/biocontainers/das_tool:1.1.4--r41hdfd78af_1' }"
input: input:
tuple val(meta), path(contigs), path(bins) tuple val(meta), path(contigs), path(bins)
path(proteins) path(proteins)
path(db_directory) path(db_directory)
val(search_engine)
output: output:
tuple val(meta), path("*.log") , emit: log tuple val(meta), path("*.log") , emit: log
tuple val(meta), path("*_summary.txt") , emit: summary tuple val(meta), path("*_summary.tsv") , optional: true, emit: summary
tuple val(meta), path("*_DASTool_scaffolds2bin.txt") , emit: scaffolds2bin tuple val(meta), path("*_DASTool_contig2bin.tsv") , optional: true, emit: contig2bin
tuple val(meta), path("*.eval") , optional: true, emit: eval tuple val(meta), path("*.eval") , optional: true, emit: eval
tuple val(meta), path("*_DASTool_bins/*.fa") , optional: true, emit: bins tuple val(meta), path("*_DASTool_bins/*.fa") , optional: true, emit: bins
tuple val(meta), path("*.pdf") , optional: true, emit: pdfs tuple val(meta), path("*.pdf") , optional: true, emit: pdfs
tuple val(meta), path("*.proteins.faa") , optional: true, emit: fasta_proteins tuple val(meta), path("*.candidates.faa") , optional: true, emit: fasta_proteins
tuple val(meta), path("*.faa") , optional: true, emit: candidates_faa
tuple val(meta), path("*.archaea.scg") , optional: true, emit: fasta_archaea_scg tuple val(meta), path("*.archaea.scg") , optional: true, emit: fasta_archaea_scg
tuple val(meta), path("*.bacteria.scg") , optional: true, emit: fasta_bacteria_scg tuple val(meta), path("*.bacteria.scg") , optional: true, emit: fasta_bacteria_scg
tuple val(meta), path("*.b6") , optional: true, emit: b6
tuple val(meta), path("*.seqlength") , optional: true, emit: seqlength tuple val(meta), path("*.seqlength") , optional: true, emit: seqlength
path "versions.yml" , emit: versions path "versions.yml" , emit: versions
@ -33,17 +34,12 @@ process DASTOOL_DASTOOL {
def args = task.ext.args ?: '' def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
def bin_list = bins instanceof List ? bins.join(",") : "$bins" def bin_list = bins instanceof List ? bins.join(",") : "$bins"
def engine = search_engine ? "--search_engine $search_engine" : "--search_engine diamond"
def db_dir = db_directory ? "--db_directory $db_directory" : "" def db_dir = db_directory ? "--db_directory $db_directory" : ""
def clean_contigs = contigs.toString() - ".gz" def clean_contigs = contigs.toString() - ".gz"
def decompress_contigs = contigs.toString() == clean_contigs ? "" : "gunzip -q -f $contigs" def decompress_contigs = contigs.toString() == clean_contigs ? "" : "gunzip -q -f $contigs"
def decompress_proteins = proteins ? "gunzip -f $proteins" : ""
def clean_proteins = proteins ? proteins.toString() - ".gz" : "" def clean_proteins = proteins ? proteins.toString() - ".gz" : ""
def proteins_pred = proteins ? "--proteins $clean_proteins" : "" def decompress_proteins = proteins ? "gunzip -f $proteins" : ""
def proteins_pred = proteins ? "-p $clean_proteins" : ""
if (! search_engine) {
log.info('[DAS_Tool] Default search engine (USEARCH) is proprietary software and not available in bioconda. Using DIAMOND as alternative.')
}
""" """
$decompress_proteins $decompress_proteins
@ -53,15 +49,14 @@ process DASTOOL_DASTOOL {
$args \\ $args \\
$proteins_pred \\ $proteins_pred \\
$db_dir \\ $db_dir \\
$engine \\
-t $task.cpus \\ -t $task.cpus \\
--bins $bin_list \\ -i $bin_list \\
-c $clean_contigs \\ -c $clean_contigs \\
-o $prefix -o $prefix
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":
dastool: \$( DAS_Tool --version 2>&1 | grep "DAS Tool" | sed 's/DAS Tool version //' ) dastool: \$( DAS_Tool --version 2>&1 | grep "DAS Tool" | sed 's/DAS Tool //' )
END_VERSIONS END_VERSIONS
""" """
} }

View file

@ -34,8 +34,8 @@ input:
pattern: "*.{fa.gz,fas.gz,fasta.gz}" pattern: "*.{fa.gz,fas.gz,fasta.gz}"
- bins: - bins:
type: file type: file
description: "Scaffolds2bin tabular file generated with dastool/scaffolds2bin" description: "FastaToContig2Bin tabular file generated with dastool/fastatocontig2bin"
pattern: "*.scaffolds2bin.tsv" pattern: "*.tsv"
- proteins: - proteins:
type: file type: file
description: Predicted proteins in prodigal fasta format (>scaffoldID_geneNo) description: Predicted proteins in prodigal fasta format (>scaffoldID_geneNo)
@ -43,9 +43,6 @@ input:
- db_directory: - db_directory:
type: file type: file
description: (optional) Directory of single copy gene database. description: (optional) Directory of single copy gene database.
- search_engine:
type: val
description: Engine used for single copy gene identification. USEARCH is not supported due to it being proprietary [blast/diamond]
output: output:
- meta: - meta:
@ -65,14 +62,17 @@ output:
type: file type: file
description: Summary of output bins including quality and completeness estimates description: Summary of output bins including quality and completeness estimates
pattern: "*summary.txt" pattern: "*summary.txt"
- scaffolds2bin: - contig2bin:
type: file type: file
description: Scaffolds to bin file of output bins description: Scaffolds to bin file of output bins
pattern: "*.scaffolds2bin.txt" pattern: "*.contig2bin.txt"
- eval: - eval:
type: file type: file
description: Quality and completeness estimates of input bin sets description: Quality and completeness estimates of input bin sets
pattern: "*.eval" pattern: "*.eval"
- bins:
description: Final refined bins in fasta format
pattern: "*.fa"
- pdfs: - pdfs:
type: file type: file
description: Plots showing the amount of high quality bins and score distribution of bins per method description: Plots showing the amount of high quality bins and score distribution of bins per method
@ -89,6 +89,10 @@ output:
type: file type: file
description: Results of bacterial single-copy-gene prediction description: Results of bacterial single-copy-gene prediction
pattern: "*.bacteria.scg" pattern: "*.bacteria.scg"
- b6:
type: file
description: Results in b6 format
pattern: "*.b6"
- seqlength: - seqlength:
type: file type: file
description: Summary of contig lengths description: Summary of contig lengths

View file

@ -0,0 +1,41 @@
process DASTOOL_FASTATOCONTIG2BIN {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::das_tool=1.1.4" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/das_tool:1.1.4--r41hdfd78af_1' :
'quay.io/biocontainers/das_tool:1.1.4--r41hdfd78af_1' }"
input:
tuple val(meta), path(fasta)
val(extension)
output:
tuple val(meta), path("*.tsv"), emit: fastatocontig2bin
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def file_extension = extension ? extension : "fasta"
def clean_fasta = fasta.toString() - ".gz"
def decompress_fasta = fasta.toString() == clean_fasta ? "" : "gunzip -q -f $fasta"
"""
$decompress_fasta
Fasta_to_Contig2Bin.sh \\
$args \\
-i . \\
-e $file_extension \\
> ${prefix}.tsv
cat <<-END_VERSIONS > versions.yml
"${task.process}":
dastool: \$( DAS_Tool --version 2>&1 | grep "DAS Tool" | sed 's/DAS Tool //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,56 @@
name: dastool_fastatocontig2bin
description: Helper script to convert a set of bins in fasta format to tabular scaffolds2bin format
keywords:
- binning
- das tool
- table
- de novo
- bins
- contigs
- assembly
- das_tool
tools:
- dastool:
description: |
DAS Tool is an automated method that integrates the results
of a flexible number of binning algorithms to calculate an optimized, non-redundant
set of bins from a single assembly.
homepage: https://github.com/cmks/DAS_Tool
documentation: https://github.com/cmks/DAS_Tool
tool_dev_url: https://github.com/cmks/DAS_Tool
doi: "10.1038/s41564-018-0171-1"
licence: ["BSD"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- fasta:
type: file
description: Fasta of list of fasta files recommended to be gathered via with .collect() of bins
pattern: "*.{fa,fa.gz,fas,fas.gz,fna,fna.gz,fasta,fasta.gz}"
- extension:
type: val
description: Fasta file extension (fa | fas | fasta | ...), without .gz suffix, if gzipped input.
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- fastatocontig2bin:
type: file
description: tabular contig2bin file for DAS tool input
pattern: "*.tsv"
authors:
- "@maxibor"
- "@jfy133"

View file

@ -46,7 +46,7 @@ process DEEPVARIANT {
""" """
stub: stub:
def prefix = task.ext.prefix ?: "${meta.id}" prefix = task.ext.prefix ?: "${meta.id}"
""" """
touch ${prefix}.vcf.gz touch ${prefix}.vcf.gz
touch ${prefix}.g.vcf.gz touch ${prefix}.g.vcf.gz

View file

@ -39,9 +39,8 @@ process GATK4_CREATESEQUENCEDICTIONARY {
""" """
stub: stub:
def prefix = task.ext.prefix ?: "${meta.id}"
""" """
touch ${prefix}.dict touch test.dict
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -0,0 +1,40 @@
process GSTAMA_POLYACLEANUP {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::gs-tama=1.0.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/gs-tama:1.0.3--hdfd78af_0':
'quay.io/biocontainers/gs-tama:1.0.3--hdfd78af_0' }"
input:
tuple val(meta), path(fasta)
output:
tuple val(meta), path("*_tama.fa.gz") , emit: fasta
tuple val(meta), path("*_tama_polya_flnc_report.txt.gz"), emit: report
tuple val(meta), path("*_tama_tails.fa.gz") , emit: tails
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
if( "$fasta" == "${prefix}.fasta" | "$fasta" == "${prefix}.fa" ) error "Input and output names are the same, set prefix in module configuration"
"""
tama_flnc_polya_cleanup.py \\
-f $fasta \\
-p ${prefix} \\
$args
gzip ${prefix}.fa
gzip ${prefix}_polya_flnc_report.txt
gzip ${prefix}_tails.fa
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gstama: \$( tama_collapse.py -version | grep 'tc_version_date_'|sed 's/tc_version_date_//g' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,55 @@
name: gstama_polyacleanup
description: Helper script, remove remaining polyA sequences from Full Length Non Chimeric reads (Pacbio isoseq3)
keywords:
- gstama
- gstama/polyacleanup
- long-read
- isoseq
- tama
- trancriptome
- annotation
tools:
- gstama:
description: Gene-Switch Transcriptome Annotation by Modular Algorithms
homepage: https://github.com/sguizard/gs-tama
documentation: https://github.com/GenomeRIK/tama/wiki
tool_dev_url: https://github.com/sguizard/gs-tama
doi: "https://doi.org/10.1186/s12864-020-07123-7"
licence: ["GPL v3 License"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- fasta:
type: file
description: Full Length Non Chimeric reads in fasta format
pattern: "*.{fa,fasta}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- fasta:
type: file
description: The Full Length Non Chimeric reads clened from remaining polyA tails. The sequences are in FASTA format compressed with gzip.
pattern: "*_tama.fa.gz"
- report:
type: file
description: A text file describing the number of polyA tails removed and their length. Compressed with gzip.
pattern: "*_tama_polya_flnc_report.txt.gz"
- tails:
type: file
description: A gzip compressed FASTA file of trimmed polyA tails.
pattern: "*_tama_tails.fa.gz"
authors:
- "@sguizard"

View file

@ -8,9 +8,10 @@ process MANTA_GERMLINE {
'quay.io/biocontainers/manta:1.6.0--h9ee0642_1' }" 'quay.io/biocontainers/manta:1.6.0--h9ee0642_1' }"
input: input:
tuple val(meta), path(input), path(index), path(target_bed), path(target_bed_tbi) tuple val(meta), path(input), path(index)
path fasta path fasta
path fasta_fai path fasta_fai
tuple path(target_bed), path(target_bed_tbi)
output: output:

View file

@ -26,7 +26,7 @@ process PHANTOMPEAKQUALTOOLS {
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
""" """
RUN_SPP=`which run_spp.R` RUN_SPP=`which run_spp.R`
Rscript -e "library(caTools); source(\\"\$RUN_SPP\\")" -c="$bam" -savp="${prefix}.spp.pdf" -savd="${prefix}.spp.Rdata" -out="${prefix}.spp.out" -p=$task.cpus Rscript $args -e "library(caTools); source(\\"\$RUN_SPP\\")" -c="$bam" -savp="${prefix}.spp.pdf" -savd="${prefix}.spp.Rdata" -out="${prefix}.spp.out" -p=$task.cpus
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -0,0 +1,60 @@
name: "phantompeakqualtools"
description:
keywords:
- "ChIP-Seq"
- "QC"
- "phantom peaks"
tools:
- "phantompeakqualtools":
description: |
"This package computes informative enrichment and quality measures
for ChIP-seq/DNase-seq/FAIRE-seq/MNase-seq data. It can also be used
to obtain robust estimates of the predominant fragment length or
characteristic tag shift values in these assays."
homepage: "None"
documentation: "https://github.com/kundajelab/phantompeakqualtools"
tool_dev_url: "https://github.com/kundajelab/phantompeakqualtools"
doi: "https://doi.org/10.1101/gr.136184.111"
licence: "['BSD-3-clause']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: BAM/CRAM/SAM file
pattern: "*.{bam,cram,sam}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- spp:
type: file
description: |
A ChIP-Seq Processing Pipeline file containing
peakshift/phantomPeak results
pattern: "*.{out}"
- pdf:
type: file
description: A pdf containing save cross-correlation plots
pattern: "*.{pdf}"
- rdata:
type: file
description: Rdata file containing the R session
pattern: "*.{Rdata}"
authors:
- "@drpatelh"
- "@Emiller88"
- "@JoseEspinosa"

View file

@ -0,0 +1,61 @@
process PRINSEQPLUSPLUS {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::prinseq-plus-plus=1.2.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/prinseq-plus-plus:1.2.3--hc90279e_1':
'quay.io/biocontainers/prinseq-plus-plus:1.2.3--hc90279e_1' }"
input:
tuple val(meta), path(reads)
output:
tuple val(meta), path("*_good_out*.fastq.gz") , emit: good_reads
tuple val(meta), path("*_single_out*.fastq.gz"), optional: true, emit: single_reads
tuple val(meta), path("*_bad_out*.fastq.gz") , optional: true, emit: bad_reads
tuple val(meta), path("*.log") , emit: log
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
if (meta.single_end) {
"""
prinseq++ \\
-threads $task.cpus \\
-fastq ${reads} \\
-out_name ${prefix} \\
-out_gz \\
-VERBOSE 1 \\
$args \\
| tee ${prefix}.log
cat <<-END_VERSIONS > versions.yml
"${task.process}":
prinseqplusplus: \$(echo \$(prinseq++ --version | cut -f 2 -d ' ' ))
END_VERSIONS
"""
} else {
"""
prinseq++ \\
-threads $task.cpus \\
-fastq ${reads[0]} \\
-fastq2 ${reads[1]} \\
-out_name ${prefix} \\
-out_gz \\
-VERBOSE 1 \\
$args \\
| tee ${prefix}.log
cat <<-END_VERSIONS > versions.yml
"${task.process}":
prinseqplusplus: \$(echo \$(prinseq++ --version | cut -f 2 -d ' ' ))
END_VERSIONS
"""
}
}

View file

@ -0,0 +1,60 @@
name: "prinseqplusplus"
description: PRINSEQ++ is a C++ implementation of the prinseq-lite.pl program. It can be used to filter, reformat or trim genomic and metagenomic sequence data
keywords:
- fastq
- fasta
- filter
- trim
tools:
- "prinseqplusplus":
description: "PRINSEQ++ - Multi-threaded C++ sequence cleaning"
homepage: "https://github.com/Adrian-Cantu/PRINSEQ-plus-plus"
documentation: "https://github.com/Adrian-Cantu/PRINSEQ-plus-plus"
tool_dev_url: "https://github.com/Adrian-Cantu/PRINSEQ-plus-plus"
doi: "10.7287/peerj.preprints.27553v1"
licence: "['GPL v2']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: |
List of input FastQ files of size 1 and 2 for single-end and paired-end
data, respectively.
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- good_reads:
type: file
description: Reads passing filter(s) in gzipped FASTQ format
pattern: "*_good_out_{R1,R2}.fastq.gz"
- single_reads:
type: file
description: |
Single reads without the pair passing filter(s) in gzipped FASTQ format
pattern: "*_single_out_{R1,R2}.fastq.gz"
- bad_reads:
type: file
description: |
Reads without not passing filter(s) in gzipped FASTQ format
pattern: "*_bad_out_{R1,R2}.fastq.gz"
- log:
type: file
description: |
Verbose level 2 STDOUT information in a log file
pattern: "*.log"
authors:
- "@jfy133"

View file

@ -2,10 +2,10 @@ process PYDAMAGE_ANALYZE {
tag "$meta.id" tag "$meta.id"
label 'process_medium' label 'process_medium'
conda (params.enable_conda ? "bioconda::pydamage=0.62" : null) conda (params.enable_conda ? "bioconda::pydamage=0.70" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/pydamage:0.62--pyhdfd78af_0' : 'https://depot.galaxyproject.org/singularity/pydamage:0.70--pyhdfd78af_0' :
'quay.io/biocontainers/pydamage:0.62--pyhdfd78af_0' }" 'quay.io/biocontainers/pydamage:0.70--pyhdfd78af_0' }"
input: input:
tuple val(meta), path(bam), path(bai) tuple val(meta), path(bam), path(bai)

View file

@ -2,10 +2,10 @@ process PYDAMAGE_FILTER {
tag "$meta.id" tag "$meta.id"
label 'process_low' label 'process_low'
conda (params.enable_conda ? "bioconda::pydamage=0.62" : null) conda (params.enable_conda ? "bioconda::pydamage=0.70" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/pydamage:0.62--pyhdfd78af_0' : 'https://depot.galaxyproject.org/singularity/pydamage:0.70--pyhdfd78af_0' :
'quay.io/biocontainers/pydamage:0.62--pyhdfd78af_0' }" 'quay.io/biocontainers/pydamage:0.70--pyhdfd78af_0' }"
input: input:
tuple val(meta), path(csv) tuple val(meta), path(csv)

View file

@ -0,0 +1,34 @@
process SEQKIT_STATS {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::seqkit=2.2.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/seqkit:2.2.0--h9ee0642_0':
'quay.io/biocontainers/seqkit:2.2.0--h9ee0642_0' }"
input:
tuple val(meta), path(reads)
output:
tuple val(meta), path("*.tsv"), emit: stats
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: '--all'
def prefix = task.ext.prefix ?: "${meta.id}"
"""
seqkit stats \\
--tabular \\
$args \\
$reads > '${prefix}.tsv'
cat <<-END_VERSIONS > versions.yml
"${task.process}":
seqkit: \$( seqkit version | sed 's/seqkit v//' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,44 @@
name: "seqkit_stats"
description: simple statistics of FASTA/Q files
keywords:
- seqkit
- stats
tools:
- "seqkit":
description: Cross-platform and ultrafast toolkit for FASTA/Q file manipulation, written by Wei Shen.
homepage: https://bioinf.shenwei.me/seqkit/usage/
documentation: https://bioinf.shenwei.me/seqkit/usage/
tool_dev_url: https://github.com/shenwei356/seqkit/
doi: "10.1371/journal.pone.0163962"
licence: ["MIT"]
input:
- meta:
type: map
description: >
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: >
Either FASTA or FASTQ files.
pattern: "*.{fa,fna,faa,fasta,fq,fastq}[.gz]"
output:
- meta:
type: map
description: >
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- stats:
type: file
description: >
Tab-separated output file with basic sequence statistics.
pattern: "*.tsv"
authors:
- "@Midnighter"

View file

@ -2,10 +2,10 @@ process SVDB_MERGE {
tag "$meta.id" tag "$meta.id"
label 'process_medium' label 'process_medium'
conda (params.enable_conda ? "bioconda::svdb=2.5.2" : null) conda (params.enable_conda ? "bioconda::svdb=2.6.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/svdb:2.5.2--py39h5371cbf_0': 'https://depot.galaxyproject.org/singularity/svdb:2.6.0--py39h5371cbf_0':
'quay.io/biocontainers/svdb:2.5.2--py39h5371cbf_0' }" 'quay.io/biocontainers/svdb:2.6.0--py39h5371cbf_0' }"
input: input:
tuple val(meta), path(vcfs) tuple val(meta), path(vcfs)

View file

@ -2,10 +2,10 @@ process SVDB_QUERY {
tag "$meta.id" tag "$meta.id"
label 'process_medium' label 'process_medium'
conda (params.enable_conda ? "bioconda::svdb=2.5.2" : null) conda (params.enable_conda ? "bioconda::svdb=2.6.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/svdb:2.5.2--py39h5371cbf_0': 'https://depot.galaxyproject.org/singularity/svdb:2.6.0--py39h5371cbf_0':
'quay.io/biocontainers/svdb:2.5.2--py39h5371cbf_0' }" 'quay.io/biocontainers/svdb:2.6.0--py39h5371cbf_0' }"
input: input:
tuple val(meta), path(vcf) tuple val(meta), path(vcf)

View file

@ -40,8 +40,8 @@ process TIDDIT_COV {
stub: stub:
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
""" """
touch $prefix.wig touch ${prefix}.wig
touch $prefix.tab touch ${prefix}.tab
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -42,9 +42,9 @@ process TIDDIT_SV {
stub: stub:
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
""" """
touch $prefix.vcf touch ${prefix}.vcf
touch $prefix.ploidy.tab touch ${prefix}.ploidy.tab
touch $prefix.signals.tab touch ${prefix}.signals.tab
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -0,0 +1,50 @@
/*
* Identify transcripts with homer
*/
include { HOMER_MAKETAGDIRECTORY } from '../../../../modules/homer/maketagdirectory/main'
include { HOMER_MAKEUCSCFILE } from '../../../../modules/homer/makeucscfile/main'
include { HOMER_FINDPEAKS } from '../../../../modules/homer/findpeaks/main'
include { HOMER_POS2BED } from '../../../../modules/homer/pos2bed/main'
workflow HOMER_GROSEQ {
take:
bam // channel: [ val(meta), [ reads ] ]
fasta // file: /path/to/bwa/index/
main:
ch_versions = Channel.empty()
/*
* Create a Tag Directory From The GRO-Seq experiment
*/
HOMER_MAKETAGDIRECTORY ( bam, fasta )
ch_versions = ch_versions.mix(HOMER_MAKETAGDIRECTORY.out.versions.first())
/*
* Creating UCSC Visualization Files
*/
HOMER_MAKEUCSCFILE ( HOMER_MAKETAGDIRECTORY.out.tagdir )
ch_versions = ch_versions.mix(HOMER_MAKEUCSCFILE.out.versions.first())
/*
* Find transcripts directly from GRO-Seq
*/
HOMER_FINDPEAKS ( HOMER_MAKETAGDIRECTORY.out.tagdir )
ch_versions = ch_versions.mix(HOMER_FINDPEAKS.out.versions.first())
/*
* Convert peak file to bed file
*/
HOMER_POS2BED ( HOMER_FINDPEAKS.out.txt )
ch_versions = ch_versions.mix(HOMER_POS2BED.out.versions.first())
emit:
tagdir = HOMER_MAKETAGDIRECTORY.out.tagdir // channel: [ val(meta), [ tagdir ] ]
bed_graph = HOMER_MAKEUCSCFILE.out.bedGraph // channel: [ val(meta), [ tag_dir/*ucsc.bedGraph.gz ] ]
peaks = HOMER_FINDPEAKS.out.txt // channel: [ val(meta), [ *peaks.txt ] ]
bed = HOMER_POS2BED.out.bed // channel: [ val(meta), [ *peaks.txt ] ]
versions = ch_versions // channel: [ versions.yml ]
}

View file

@ -0,0 +1,48 @@
name: homer_groseq
description: Perform variant calling on a set of normal samples using mutect2 panel of normals mode. Group them into a genomicsdbworkspace using genomicsdbimport, then use this to create a panel of normals using createsomaticpanelofnormals.
keywords:
- homer
- groseq
- nascent
modules:
- homer/maketagdirectory
- homer/makeucscfile
- homer/findpeaks
- homer/pos2bed
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- input:
type: list
description: list of BAM files, also able to take SAM and BED as input
pattern: "[ *.{bam/sam/bed} ]"
- fasta:
type: file
description: The reference fasta file
pattern: "*.fasta"
output:
- tagdir:
type: directory
description: The "Tag Directory"
pattern: "*_tagdir"
- bedGraph:
type: file
description: The UCSC bed graph
pattern: "*.bedGraph.gz"
- peaks:
type: file
description: The found peaks
pattern: "*.peaks.txt"
- bed:
type: file
description: A BED file of the found peaks
pattern: "*.bed"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@Emiller88"

View file

@ -214,6 +214,10 @@ biobambam/bammarkduplicates2:
- modules/biobambam/bammarkduplicates2/** - modules/biobambam/bammarkduplicates2/**
- tests/modules/biobambam/bammarkduplicates2/** - tests/modules/biobambam/bammarkduplicates2/**
biobambam/bamsormadup:
- modules/biobambam/bamsormadup/**
- tests/modules/biobambam/bamsormadup/**
biscuit/align: biscuit/align:
- modules/biscuit/index/** - modules/biscuit/index/**
- modules/biscuit/align/** - modules/biscuit/align/**
@ -391,9 +395,9 @@ cellranger/mkref:
- modules/cellranger/gtf/** - modules/cellranger/gtf/**
- tests/modules/cellranger/gtf/** - tests/modules/cellranger/gtf/**
centrifuge: centrifuge/centrifuge:
- modules/centrifuge/** - modules/centrifuge/centrifuge/**
- tests/modules/centrifuge/** - tests/modules/centrifuge/centrifuge/**
checkm/lineagewf: checkm/lineagewf:
- modules/checkm/lineagewf/** - modules/checkm/lineagewf/**
@ -487,6 +491,10 @@ dastool/dastool:
- modules/dastool/dastool/** - modules/dastool/dastool/**
- tests/modules/dastool/dastool/** - tests/modules/dastool/dastool/**
dastool/fastatocontig2bin:
- modules/dastool/fastatocontig2bin/**
- tests/modules/dastool/fastatocontig2bin/**
dastool/scaffolds2bin: dastool/scaffolds2bin:
- modules/dastool/scaffolds2bin/** - modules/dastool/scaffolds2bin/**
- tests/modules/dastool/scaffolds2bin/** - tests/modules/dastool/scaffolds2bin/**
@ -811,6 +819,10 @@ gstama/merge:
- modules/gstama/merge/** - modules/gstama/merge/**
- tests/modules/gstama/merge/** - tests/modules/gstama/merge/**
gstama/polyacleanup:
- modules/gstama/polyacleanup/**
- tests/modules/gstama/polyacleanup/**
gtdbtk/classifywf: gtdbtk/classifywf:
- modules/gtdbtk/classifywf/** - modules/gtdbtk/classifywf/**
- tests/modules/gtdbtk/classifywf/** - tests/modules/gtdbtk/classifywf/**
@ -1299,6 +1311,10 @@ peddy:
- modules/peddy/** - modules/peddy/**
- tests/modules/peddy/** - tests/modules/peddy/**
phantompeakqualtools:
- modules/phantompeakqualtools/**
- tests/modules/phantompeakqualtools/**
phyloflash: phyloflash:
- modules/phyloflash/** - modules/phyloflash/**
- tests/modules/phyloflash/** - tests/modules/phyloflash/**
@ -1395,6 +1411,10 @@ preseq/lcextrap:
- modules/preseq/lcextrap/** - modules/preseq/lcextrap/**
- tests/modules/preseq/lcextrap/** - tests/modules/preseq/lcextrap/**
prinseqplusplus:
- modules/prinseqplusplus/**
- tests/modules/prinseqplusplus/**
prodigal: prodigal:
- modules/prodigal/** - modules/prodigal/**
- tests/modules/prodigal/** - tests/modules/prodigal/**
@ -1591,6 +1611,10 @@ seqkit/split2:
- modules/seqkit/split2/** - modules/seqkit/split2/**
- tests/modules/seqkit/split2/** - tests/modules/seqkit/split2/**
seqkit/stats:
- modules/seqkit/stats/**
- tests/modules/seqkit/stats/**
seqsero2: seqsero2:
- modules/seqsero2/** - modules/seqsero2/**
- tests/modules/seqsero2/** - tests/modules/seqsero2/**

View file

@ -3,10 +3,10 @@
tags: tags:
- adapterremoval - adapterremoval
files: files:
- path: output/adapterremoval/test.discarded.gz - path: output/adapterremoval/test.discarded.fastq.gz
- path: output/adapterremoval/test.log - path: output/adapterremoval/test.settings
md5sum: 2fd3d5d703b63ba33a83021fccf25f77 md5sum: 2fd3d5d703b63ba33a83021fccf25f77
- path: output/adapterremoval/test.truncated.gz - path: output/adapterremoval/test.truncated.fastq.gz
md5sum: 62139afee94defad5b83bdd0b8475a1f md5sum: 62139afee94defad5b83bdd0b8475a1f
- path: output/adapterremoval/versions.yml - path: output/adapterremoval/versions.yml
md5sum: ac5b46719719b7ee62739530b80869fc md5sum: ac5b46719719b7ee62739530b80869fc
@ -16,12 +16,12 @@
tags: tags:
- adapterremoval - adapterremoval
files: files:
- path: output/adapterremoval/test.discarded.gz - path: output/adapterremoval/test.discarded.fastq.gz
- path: output/adapterremoval/test.log - path: output/adapterremoval/test.settings
md5sum: b8a451d3981b327f3fdb44f40ba2d6d1 md5sum: b8a451d3981b327f3fdb44f40ba2d6d1
- path: output/adapterremoval/test.pair1.truncated.gz - path: output/adapterremoval/test.pair1.truncated.fastq.gz
md5sum: 294a6277f0139bd597e57c6fa31f39c7 md5sum: 294a6277f0139bd597e57c6fa31f39c7
- path: output/adapterremoval/test.pair2.truncated.gz - path: output/adapterremoval/test.pair2.truncated.fastq.gz
md5sum: de7b38e2c881bced8671acb1ab452d78 md5sum: de7b38e2c881bced8671acb1ab452d78
- path: output/adapterremoval/versions.yml - path: output/adapterremoval/versions.yml
md5sum: fa621c887897da5a379c719399c17db7 md5sum: fa621c887897da5a379c719399c17db7
@ -31,15 +31,15 @@
tags: tags:
- adapterremoval - adapterremoval
files: files:
- path: output/adapterremoval/test.collapsed.gz - path: output/adapterremoval/test.collapsed.fastq.gz
md5sum: ff956de3532599a56c3efe5369f0953f md5sum: ff956de3532599a56c3efe5369f0953f
- path: output/adapterremoval/test.collapsed.truncated.gz - path: output/adapterremoval/test.collapsed.truncated.fastq.gz
- path: output/adapterremoval/test.discarded.gz - path: output/adapterremoval/test.discarded.fastq.gz
- path: output/adapterremoval/test.log - path: output/adapterremoval/test.settings
md5sum: 7f0b2328152226e46101a535cce718b3 md5sum: 7f0b2328152226e46101a535cce718b3
- path: output/adapterremoval/test.pair1.truncated.gz - path: output/adapterremoval/test.pair1.truncated.fastq.gz
md5sum: 683be19bc1c83008944b6b719bfa34e1 md5sum: 683be19bc1c83008944b6b719bfa34e1
- path: output/adapterremoval/test.pair2.truncated.gz - path: output/adapterremoval/test.pair2.truncated.fastq.gz
md5sum: e6548fe061f3ef86368b26da930174d0 md5sum: e6548fe061f3ef86368b26da930174d0
- path: output/adapterremoval/versions.yml - path: output/adapterremoval/versions.yml
md5sum: 78f589bb313c8da0147ca8ce77d7f3bf md5sum: 78f589bb313c8da0147ca8ce77d7f3bf

View file

@ -5,8 +5,8 @@
- biobambam - biobambam
files: files:
- path: output/biobambam/test.bam - path: output/biobambam/test.bam
md5sum: 1cf7f957eb20b4ace9f10d0cf0a0649a md5sum: 603edff09029096ddf2bb8a3f12d7aa7
- path: output/biobambam/test.metrics.txt - path: output/biobambam/test.metrics.txt
md5sum: 30d6e7d90bb5df46329d4bc0144ce927 md5sum: 30d6e7d90bb5df46329d4bc0144ce927
- path: output/biobambam/versions.yml - path: output/biobambam/versions.yml
md5sum: 0d6f3137ed4515333d73c779f2c24445 md5sum: dfdf2b084655d124acac0bfb4eda86cc

View file

@ -0,0 +1,15 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BIOBAMBAM_BAMSORMADUP } from '../../../../modules/biobambam/bamsormadup/main.nf'
workflow test_biobambam_bamsormadup {
input = [
[ id:'test', single_end:false ], // meta map
[file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true), file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true)],
]
BIOBAMBAM_BAMSORMADUP ( input, [] )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,11 @@
- name: biobambam bamsormadup test_biobambam_bamsormadup
command: nextflow run tests/modules/biobambam/bamsormadup -entry test_biobambam_bamsormadup -c tests/config/nextflow.config
tags:
- biobambam/bamsormadup
- biobambam
files:
- path: output/biobambam/test.bam
md5sum: 243a77fb0642fd46bb16a4d3432d19dc
- path: output/biobambam/test.metrics.txt
md5sum: 1721879bea1f3888ecd33b35e6ee0e72
- path: output/biobambam/versions.yml

View file

@ -7,6 +7,14 @@
- path: output/cat/test.fasta - path: output/cat/test.fasta
md5sum: f44b33a0e441ad58b2d3700270e2dbe2 md5sum: f44b33a0e441ad58b2d3700270e2dbe2
- name: cat unzipped unzipped stub
command: nextflow run ./tests/modules/cat/cat -entry test_cat_unzipped_unzipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config -stub-run
tags:
- cat
- cat/cat
files:
- path: output/cat/test.fasta
- name: cat zipped zipped - name: cat zipped zipped
command: nextflow run ./tests/modules/cat/cat -entry test_cat_zipped_zipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config command: nextflow run ./tests/modules/cat/cat -entry test_cat_zipped_zipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config
tags: tags:
@ -15,6 +23,14 @@
files: files:
- path: output/cat/test.gz - path: output/cat/test.gz
- name: cat zipped zipped stub
command: nextflow run ./tests/modules/cat/cat -entry test_cat_zipped_zipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config -stub-run
tags:
- cat
- cat/cat
files:
- path: output/cat/test.gz
- name: cat zipped unzipped - name: cat zipped unzipped
command: nextflow run ./tests/modules/cat/cat -entry test_cat_zipped_unzipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config command: nextflow run ./tests/modules/cat/cat -entry test_cat_zipped_unzipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config
tags: tags:
@ -24,6 +40,14 @@
- path: output/cat/cat.txt - path: output/cat/cat.txt
md5sum: c439d3b60e7bc03e8802a451a0d9a5d9 md5sum: c439d3b60e7bc03e8802a451a0d9a5d9
- name: cat zipped unzipped stub
command: nextflow run ./tests/modules/cat/cat -entry test_cat_zipped_unzipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config -stub-run
tags:
- cat
- cat/cat
files:
- path: output/cat/cat.txt
- name: cat unzipped zipped - name: cat unzipped zipped
command: nextflow run ./tests/modules/cat/cat -entry test_cat_unzipped_zipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config command: nextflow run ./tests/modules/cat/cat -entry test_cat_unzipped_zipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config
tags: tags:
@ -32,6 +56,14 @@
files: files:
- path: output/cat/cat.txt.gz - path: output/cat/cat.txt.gz
- name: cat unzipped zipped stub
command: nextflow run ./tests/modules/cat/cat -entry test_cat_unzipped_zipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config -stub-run
tags:
- cat
- cat/cat
files:
- path: output/cat/cat.txt.gz
- name: cat one file unzipped zipped - name: cat one file unzipped zipped
command: nextflow run ./tests/modules/cat/cat -entry test_cat_one_file_unzipped_zipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config command: nextflow run ./tests/modules/cat/cat -entry test_cat_one_file_unzipped_zipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config
tags: tags:
@ -39,3 +71,11 @@
- cat/cat - cat/cat
files: files:
- path: output/cat/cat.txt.gz - path: output/cat/cat.txt.gz
- name: cat one file unzipped zipped stub
command: nextflow run ./tests/modules/cat/cat -entry test_cat_one_file_unzipped_zipped -c ./tests/config/nextflow.config -c ./tests/modules/cat/cat/nextflow.config -stub-run
tags:
- cat
- cat/cat
files:
- path: output/cat/cat.txt.gz

View file

@ -0,0 +1,37 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { UNTAR } from '../../../../modules/untar/main.nf'
include { CENTRIFUGE_CENTRIFUGE } from '../../../../modules/centrifuge/centrifuge/main.nf'
workflow test_centrifuge_centrifuge_single_end {
input = [ [ id:'test', single_end:true ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) ]
]
db = [ [], file('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/minigut_cf.tar.gz', checkIfExists: true) ]
save_unaligned = true
save_aligned = false
sam_format = false
UNTAR ( db )
CENTRIFUGE_CENTRIFUGE ( input, UNTAR.out.untar.map{ it[1] }, save_unaligned, save_aligned, sam_format )
}
workflow test_centrifuge_centrifuge_paired_end {
input = [ [ id:'test', single_end:false ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
]
db = [ [], file('https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/minigut_cf.tar.gz', checkIfExists: true) ]
//db_name = "minigut_cf"
save_unaligned = true
save_aligned = false
sam_format = false
UNTAR ( db )
CENTRIFUGE_CENTRIFUGE ( input, UNTAR.out.untar.map{ it[1] }, save_unaligned, save_aligned, sam_format )
}

View file

@ -1,20 +1,20 @@
- name: centrifuge test_centrifuge_single_end - name: centrifuge centrifuge test_centrifuge_centrifuge_single_end
command: nextflow run tests/modules/centrifuge -entry test_centrifuge_single_end -c tests/config/nextflow.config command: nextflow run tests/modules/centrifuge/centrifuge -entry test_centrifuge_centrifuge_single_end -c tests/config/nextflow.config
tags: tags:
- centrifuge - centrifuge
- centrifuge/centrifuge
files: files:
- path: output/centrifuge/test.kreport.txt
- path: output/centrifuge/test.report.txt - path: output/centrifuge/test.report.txt
- path: output/centrifuge/test.results.txt - path: output/centrifuge/test.results.txt
- path: output/centrifuge/test.unmapped.fastq.gz - path: output/centrifuge/test.unmapped.fastq.gz
- path: output/centrifuge/versions.yml - path: output/centrifuge/versions.yml
- name: centrifuge test_centrifuge_paired_end - name: centrifuge centrifuge test_centrifuge_centrifuge_paired_end
command: nextflow run tests/modules/centrifuge -entry test_centrifuge_paired_end -c tests/config/nextflow.config command: nextflow run tests/modules/centrifuge/centrifuge -entry test_centrifuge_centrifuge_paired_end -c tests/config/nextflow.config
tags: tags:
- centrifuge - centrifuge
- centrifuge/centrifuge
files: files:
- path: output/centrifuge/test.kreport.txt
- path: output/centrifuge/test.report.txt - path: output/centrifuge/test.report.txt
- path: output/centrifuge/test.results.txt - path: output/centrifuge/test.results.txt
- path: output/centrifuge/test.unmapped.fastq.1.gz - path: output/centrifuge/test.unmapped.fastq.1.gz

View file

@ -1,33 +0,0 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { CENTRIFUGE } from '../../../modules/centrifuge/main.nf'
workflow test_centrifuge_single_end {
input = [ [ id:'test', single_end:true ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) ]
]
db = file("https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/minigut_cf.tar.gz", checkIfExists: true)
save_unaligned = true
save_aligned = false
sam_format = false
CENTRIFUGE ( input, db, save_unaligned, save_aligned, sam_format )
}
workflow test_centrifuge_paired_end {
input = [ [ id:'test', single_end:false ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
]
db = file("https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/minigut_cf.tar.gz", checkIfExists: true)
save_unaligned = true
save_aligned = false
sam_format = false
CENTRIFUGE ( input, db, save_unaligned, save_aligned, sam_format )
}

View file

@ -4,7 +4,17 @@
- cnvpytor - cnvpytor
- cnvpytor/callcnvs - cnvpytor/callcnvs
files: files:
- path: output/cnvpytor/calls.10000.tsv - path: output/cnvpytor/test.tsv
md5sum: d41d8cd98f00b204e9800998ecf8427e md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: output/cnvpytor/versions.yml - path: output/cnvpytor/versions.yml
md5sum: 5fe6ca3ef5c40f9dbf487f28db237821 md5sum: 0bea08a253fcb2ff0ff79b99df77b9fa
- name: cnvpytor callcnvs test_cnvpytor_callcnvs stub
command: nextflow run tests/modules/cnvpytor/callcnvs -entry test_cnvpytor_callcnvs -c tests/config/nextflow.config -stub-run
tags:
- cnvpytor
- cnvpytor/callcnvs
files:
- path: output/cnvpytor/test.tsv
- path: output/cnvpytor/versions.yml
md5sum: 0bea08a253fcb2ff0ff79b99df77b9fa

View file

@ -7,4 +7,14 @@
- path: output/cnvpytor/test.pytor - path: output/cnvpytor/test.pytor
md5sum: aa03a8fa15b39f77816705a48e10312a md5sum: aa03a8fa15b39f77816705a48e10312a
- path: output/cnvpytor/versions.yml - path: output/cnvpytor/versions.yml
md5sum: 9a4b176afd5f1a3edeb37eeb301cf464 md5sum: 0f4d75c4f3a3eb26c22616d12b0b78b2
- name: cnvpytor histogram test_cnvpytor_histogram stub
command: nextflow run tests/modules/cnvpytor/histogram -entry test_cnvpytor_histogram -c tests/config/nextflow.config -stub-run
tags:
- cnvpytor
- cnvpytor/histogram
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: 0f4d75c4f3a3eb26c22616d12b0b78b2

View file

@ -8,5 +8,5 @@ process {
} }
params { params {
cnvpytor_chr = '' // specifies chromosome name(s) the same way as they are described in the sam/bam/cram header e.g. '1 2' or 'chr1 chr2'. cnvpytor_chr = null // specifies chromosome name(s) the same way as they are described in the sam/bam/cram header e.g. '1 2' or 'chr1 chr2'.
} }

View file

@ -0,0 +1,39 @@
- name: cnvpytor importreaddepth test_cnvpytor_importreaddepth
command: nextflow run tests/modules/cnvpytor/importreaddepth -entry test_cnvpytor_importreaddepth -c tests/config/nextflow.config
tags:
- cnvpytor
- cnvpytor/importreaddepth
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: 5834495324c08a37f3fd73ccdd881dc8
- name: cnvpytor importreaddepth test_cnvpytor_importreaddepth stub
command: nextflow run tests/modules/cnvpytor/importreaddepth -entry test_cnvpytor_importreaddepth -c tests/config/nextflow.config -stub-run
tags:
- cnvpytor
- cnvpytor/importreaddepth
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: 5834495324c08a37f3fd73ccdd881dc8
- name: cnvpytor importreaddepth test_cnvpytor_importreaddepth_cram
command: nextflow run tests/modules/cnvpytor/importreaddepth -entry test_cnvpytor_importreaddepth_cram -c tests/config/nextflow.config
tags:
- cnvpytor
- cnvpytor/importreaddepth
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: dfa0afb0982d985b96d1633f71ebb82a
- name: cnvpytor importreaddepth test_cnvpytor_importreaddepth_cram stub
command: nextflow run tests/modules/cnvpytor/importreaddepth -entry test_cnvpytor_importreaddepth_cram -c tests/config/nextflow.config -stub-run
tags:
- cnvpytor
- cnvpytor/importreaddepth
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: dfa0afb0982d985b96d1633f71ebb82a

View file

@ -7,4 +7,14 @@
- path: output/cnvpytor/test.pytor - path: output/cnvpytor/test.pytor
md5sum: aa03a8fa15b39f77816705a48e10312a md5sum: aa03a8fa15b39f77816705a48e10312a
- path: output/cnvpytor/versions.yml - path: output/cnvpytor/versions.yml
md5sum: 8a04506554c58cd170cc050fd9904c6f md5sum: 7fd6ec952a316463bcd324f176b46b64
- name: cnvpytor partition test_cnvpytor_partition stub
command: nextflow run tests/modules/cnvpytor/partition -entry test_cnvpytor_partition -c tests/config/nextflow.config -stub-run
tags:
- cnvpytor
- cnvpytor/partition
files:
- path: output/cnvpytor/test.pytor
- path: output/cnvpytor/versions.yml
md5sum: 7fd6ec952a316463bcd324f176b46b64

View file

@ -3,7 +3,7 @@ nextflow.enable.dsl = 2
include { METABAT2_METABAT2 } from '../../../../modules/metabat2/metabat2/main.nf' include { METABAT2_METABAT2 } from '../../../../modules/metabat2/metabat2/main.nf'
include { METABAT2_JGISUMMARIZEBAMCONTIGDEPTHS } from '../../../../modules/metabat2/jgisummarizebamcontigdepths/main.nf' include { METABAT2_JGISUMMARIZEBAMCONTIGDEPTHS } from '../../../../modules/metabat2/jgisummarizebamcontigdepths/main.nf'
include { DASTOOL_SCAFFOLDS2BIN } from '../../../../modules/dastool/scaffolds2bin/main.nf' include { DASTOOL_FASTATOCONTIG2BIN } from '../../../../modules/dastool/fastatocontig2bin/main.nf'
include { DASTOOL_DASTOOL } from '../../../../modules/dastool/dastool/main.nf' include { DASTOOL_DASTOOL } from '../../../../modules/dastool/dastool/main.nf'
workflow test_dastool_dastool { workflow test_dastool_dastool {
@ -21,13 +21,13 @@ workflow test_dastool_dastool {
METABAT2_METABAT2 ( input_metabat2 ) METABAT2_METABAT2 ( input_metabat2 )
DASTOOL_SCAFFOLDS2BIN ( METABAT2_METABAT2.out.fasta.collect(), "fa") DASTOOL_FASTATOCONTIG2BIN ( METABAT2_METABAT2.out.fasta.collect(), "fa")
Channel.of([ [ id:'test', single_end:false ], // meta map Channel.of([ [ id:'test', single_end:false ], // meta map
file(params.test_data['bacteroides_fragilis']['genome']['genome_fna_gz'], checkIfExists: true)]) file(params.test_data['bacteroides_fragilis']['genome']['genome_fna_gz'], checkIfExists: true)])
.join(DASTOOL_SCAFFOLDS2BIN.out.scaffolds2bin) .join( DASTOOL_FASTATOCONTIG2BIN.out.fastatocontig2bin )
.set {input_dastool} .set {input_dastool}
DASTOOL_DASTOOL ( input_dastool, [], [], [] ) DASTOOL_DASTOOL ( input_dastool, [], [] )
} }

View file

@ -1,29 +1,28 @@
- name: dastool dastool test_dastool_dastool - name: dastool dastool test_dastool_dastool
command: nextflow run ./tests/modules/dastool/dastool -entry test_dastool_dastool -c ./tests/config/nextflow.config -c ./tests/modules/dastool/dastool/nextflow.config command: nextflow run tests/modules/dastool/dastool -entry test_dastool_dastool -c tests/config/nextflow.config
tags: tags:
- dastool
- dastool/dastool - dastool/dastool
- dastool
files: files:
- path: output/dastool/test.seqlength - path: output/dastool/test.seqlength
md5sum: b815a5811008c36808a59b1d0dcfab24 md5sum: b815a5811008c36808a59b1d0dcfab24
- path: output/dastool/test.tsv - path: output/dastool/test.tsv
md5sum: 6e46c0be14dded7cb13af38f54feea47 md5sum: 6e46c0be14dded7cb13af38f54feea47
- path: output/dastool/test_DASTool.log - path: output/dastool/test_DASTool.log
contains: - path: output/dastool/test_DASTool_contig2bin.tsv
- "DAS Tool run on"
- path: output/dastool/test_DASTool_scaffolds2bin.txt
md5sum: 6e46c0be14dded7cb13af38f54feea47 md5sum: 6e46c0be14dded7cb13af38f54feea47
- path: output/dastool/test_DASTool_summary.txt - path: output/dastool/test_DASTool_summary.tsv
md5sum: a3efa8717b30dfada78dc5ae9a3dc396 md5sum: ab9dd3709a59a69bc66030b9e0ff3d5b
- path: output/dastool/test_proteins.faa
- path: output/dastool/test_proteins.faa.all.b6
md5sum: 39c11237ef22ac73109aaac267e185d0
- path: output/dastool/test_proteins.faa.archaea.scg - path: output/dastool/test_proteins.faa.archaea.scg
md5sum: e79d82eecee25821d1658ea4f082601d md5sum: e79d82eecee25821d1658ea4f082601d
- path: output/dastool/test_proteins.faa.bacteria.scg - path: output/dastool/test_proteins.faa.bacteria.scg
md5sum: 8132cfb17cf398d41c036ead55c96ffe md5sum: 8132cfb17cf398d41c036ead55c96ffe
- path: output/dastool/test_test.tsv.eval - path: output/dastool/test_proteins.faa.findSCG.b6
md5sum: a3efa8717b30dfada78dc5ae9a3dc396 md5sum: 48e90e12cd6c88d00608777dbc48a82a
- path: output/metabat2/bins/test.1.fa.gz - path: output/dastool/test_proteins.faa.scg.candidates.faa
md5sum: 2b297bf557cc3831b800348859331268 md5sum: d94b7bed0f8aa9cf2824d72c548c537c
- path: output/metabat2/test.tsv.gz - path: output/dastool/versions.yml
md5sum: 619338fa5019e361d5545ce385a6961f md5sum: 004e04c6a38652df2e0c59c44e29c9de
- path: output/metabat2/test.txt.gz
md5sum: 745a0446af6ef68b930975e9ce5a95d6

View file

@ -0,0 +1,48 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { GUNZIP } from '../../../../modules/gunzip/main.nf'
include { METABAT2_METABAT2 } from '../../../../modules/metabat2/metabat2/main.nf'
include { METABAT2_JGISUMMARIZEBAMCONTIGDEPTHS } from '../../../../modules/metabat2/jgisummarizebamcontigdepths/main.nf'
include { DASTOOL_FASTATOCONTIG2BIN } from '../../../../modules/dastool/fastatocontig2bin/main.nf'
workflow test_dastool_fastatocontig2bin {
input_depth = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['bacteroides_fragilis']['illumina']['test1_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['bacteroides_fragilis']['illumina']['test1_paired_end_sorted_bam_bai'], checkIfExists: true) ]
METABAT2_JGISUMMARIZEBAMCONTIGDEPTHS ( input_depth )
Channel.fromPath(params.test_data['bacteroides_fragilis']['genome']['genome_fna_gz'], checkIfExists: true)
.map { it -> [[ id:'test', single_end:false ], it] }
.join(METABAT2_JGISUMMARIZEBAMCONTIGDEPTHS.out.depth)
.set { input_metabat2 }
METABAT2_METABAT2 ( input_metabat2 )
DASTOOL_FASTATOCONTIG2BIN ( METABAT2_METABAT2.out.fasta.collect(), "fa")
}
workflow test_dastool_fastatocontig2bin_ungzipped {
input_depth = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['bacteroides_fragilis']['illumina']['test1_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['bacteroides_fragilis']['illumina']['test1_paired_end_sorted_bam_bai'], checkIfExists: true) ]
METABAT2_JGISUMMARIZEBAMCONTIGDEPTHS ( input_depth )
Channel.fromPath(params.test_data['bacteroides_fragilis']['genome']['genome_fna_gz'], checkIfExists: true)
.map { it -> [[ id:'test', single_end:false ], it] }
.join(METABAT2_JGISUMMARIZEBAMCONTIGDEPTHS.out.depth)
.set { input_metabat2 }
METABAT2_METABAT2 ( input_metabat2 )
// TODO test unzipped input files
ch_input_2_fastatocontig2bin = GUNZIP( METABAT2_METABAT2.out.fasta ).gunzip
DASTOOL_FASTATOCONTIG2BIN ( ch_input_2_fastatocontig2bin, "fa")
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,20 @@
- name: dastool fastatocontig2bin test_dastool_fastatocontig2bin
command: nextflow run tests/modules/dastool/fastatocontig2bin -entry test_dastool_fastatocontig2bin -c tests/config/nextflow.config
tags:
- dastool
- dastool/fastatocontig2bin
files:
- path: output/dastool/test.tsv
md5sum: 6e46c0be14dded7cb13af38f54feea47
- path: output/dastool/versions.yml
md5sum: ff4b6f14bee4548bf09b5e602c306595
- name: dastool fastatocontig2bin test_dastool_fastatocontig2bin_ungzipped
command: nextflow run tests/modules/dastool/fastatocontig2bin -entry test_dastool_fastatocontig2bin_ungzipped -c tests/config/nextflow.config
tags:
- dastool
- dastool/fastatocontig2bin
files:
- path: output/dastool/test.tsv
md5sum: 6e46c0be14dded7cb13af38f54feea47
- path: output/dastool/versions.yml

View file

@ -0,0 +1,15 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { GSTAMA_POLYACLEANUP } from '../../../../modules/gstama/polyacleanup/main.nf'
workflow test_gstama_polyacleanup {
input = [
[ id:'test' ], // meta map
file(params.test_data['homo_sapiens']['genome']['transcriptome_fasta'], checkIfExists: true)
]
GSTAMA_POLYACLEANUP ( input )
}

View file

@ -0,0 +1,6 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
ext.prefix = { "${meta.id}_tama" }
}

View file

@ -0,0 +1,14 @@
- name: gstama polyacleanup test_gstama_polyacleanup
command: nextflow run tests/modules/gstama/polyacleanup -entry test_gstama_polyacleanup -c tests/config/nextflow.config
tags:
- gstama
- gstama/polyacleanup
files:
- path: output/gstama/test_tama.fa.gz
md5sum: 9c768387478e5f966a42c369c0270b09
- path: output/gstama/test_tama_polya_flnc_report.txt.gz
md5sum: fe3606979ed11538aacd83159f4cff03
- path: output/gstama/test_tama_tails.fa.gz
md5sum: ba21256c0afe0bda71b3ee66b4c761bf
- path: output/gstama/versions.yml
md5sum: 07ebb812ae13a350d955fab7600b2542

View file

@ -7,28 +7,30 @@ include { MANTA_GERMLINE } from '../../../../modules/manta/germline/main.nf'
workflow test_manta_germline { workflow test_manta_germline {
input = [ input = [
[ id:'test'], // meta map [ id:'test'], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram'], checkIfExists: true), [ file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram'], checkIfExists: true)],
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram_crai'], checkIfExists: true), [ file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram_crai'], checkIfExists: true)]
[],[]
] ]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true) fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true) fai = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
bed = [[],[]]
MANTA_GERMLINE ( input, fasta, fai ) MANTA_GERMLINE ( input, fasta, fai, bed )
} }
workflow test_manta_germline_target_bed { workflow test_manta_germline_target_bed {
input = [ input = [
[ id:'test'], // meta map [ id:'test'], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram'], checkIfExists: true), [ file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram'], checkIfExists: true)],
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram_crai'], checkIfExists: true), [ file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram_crai'], checkIfExists: true)]
file(params.test_data['homo_sapiens']['genome']['genome_bed_gz'], checkIfExists: true),
file(params.test_data['homo_sapiens']['genome']['genome_bed_gz_tbi'], checkIfExists: true)
] ]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true) fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true) fai = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
bed = [
file(params.test_data['homo_sapiens']['genome']['genome_bed_gz'], checkIfExists: true),
file(params.test_data['homo_sapiens']['genome']['genome_bed_gz_tbi'], checkIfExists: true),
]
MANTA_GERMLINE ( input, fasta, fai ) MANTA_GERMLINE ( input, fasta, fai, bed )
} }
workflow test_manta_germline_target_bed_jointcalling { workflow test_manta_germline_target_bed_jointcalling {
@ -37,12 +39,14 @@ workflow test_manta_germline_target_bed_jointcalling {
[file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram'], checkIfExists: true), [file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_cram'], checkIfExists: true)], file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_cram'], checkIfExists: true)],
[file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram_crai'], checkIfExists: true), [file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram_crai'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_cram_crai'], checkIfExists: true),], file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_cram_crai'], checkIfExists: true),]
file(params.test_data['homo_sapiens']['genome']['genome_bed_gz'], checkIfExists: true),
file(params.test_data['homo_sapiens']['genome']['genome_bed_gz_tbi'], checkIfExists: true)
] ]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true) fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true) fai = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
bed = [
file(params.test_data['homo_sapiens']['genome']['genome_bed_gz'], checkIfExists: true),
file(params.test_data['homo_sapiens']['genome']['genome_bed_gz_tbi'], checkIfExists: true),
]
MANTA_GERMLINE ( input, fasta, fai ) MANTA_GERMLINE ( input, fasta, fai, bed )
} }

View file

@ -0,0 +1,25 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { PHANTOMPEAKQUALTOOLS } from '../../../modules/phantompeakqualtools/main.nf'
workflow test_phantompeakqualtools_single_end {
input = [
[ id:'test', single_end:true ], // meta map
file(params.test_data['sarscov2']['illumina']['test_single_end_bam'], checkIfExists: true)
]
PHANTOMPEAKQUALTOOLS ( input )
}
workflow test_phantompeakqualtools_paired_end {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true)
]
PHANTOMPEAKQUALTOOLS ( input )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,23 @@
- name: phantompeakqualtools test_phantompeakqualtools_single_end
command: nextflow run tests/modules/phantompeakqualtools -entry test_phantompeakqualtools_single_end -c tests/config/nextflow.config
tags:
- phantompeakqualtools
files:
- path: output/phantompeakqualtools/test.spp.Rdata
- path: output/phantompeakqualtools/test.spp.out
md5sum: b01d976506b6fe45b66c821b1e8a1d15
- path: output/phantompeakqualtools/test.spp.pdf
- path: output/phantompeakqualtools/versions.yml
md5sum: 6c2ede1aac4c574e3c72fbe09f15c03f
- name: phantompeakqualtools test_phantompeakqualtools_paired_end
command: nextflow run tests/modules/phantompeakqualtools -entry test_phantompeakqualtools_paired_end -c tests/config/nextflow.config
tags:
- phantompeakqualtools
files:
- path: output/phantompeakqualtools/test.spp.Rdata
- path: output/phantompeakqualtools/test.spp.out
md5sum: eed46e75eab119224f397a7a8b5924e6
- path: output/phantompeakqualtools/test.spp.pdf
- path: output/phantompeakqualtools/versions.yml
md5sum: 383d2dd583fcb40451bde0d3840bdb72

View file

@ -0,0 +1,24 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { PRINSEQPLUSPLUS } from '../../../modules/prinseqplusplus/main.nf'
workflow test_prinseqplusplus_single_end {
input = [ [ id:'test', single_end:true ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) ]
]
PRINSEQPLUSPLUS ( input )
}
workflow test_prinseqplusplus_paired_end {
input = [ [ id:'test', single_end:false ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
]
PRINSEQPLUSPLUS ( input )
}

View file

@ -0,0 +1,9 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: PRINSEQPLUSPLUS {
ext.args = "-lc_entropy=0.8"
}
}

View file

@ -0,0 +1,27 @@
- name: prinseqplusplus test_prinseqplusplus_single_end
command: nextflow run tests/modules/prinseqplusplus -entry test_prinseqplusplus_single_end -c tests/config/nextflow.config
tags:
- prinseqplusplus
files:
- path: output/prinseqplusplus/test.log
contains:
- "reads removed by -lc_entropy"
- path: output/prinseqplusplus/test_bad_out.fastq.gz
- path: output/prinseqplusplus/test_good_out.fastq.gz
- path: output/prinseqplusplus/versions.yml
- name: prinseqplusplus test_prinseqplusplus_paired_end
command: nextflow run tests/modules/prinseqplusplus -entry test_prinseqplusplus_paired_end -c tests/config/nextflow.config
tags:
- prinseqplusplus
files:
- path: output/prinseqplusplus/test.log
contains:
- "reads removed by -lc_entropy"
- path: output/prinseqplusplus/test_bad_out_R1.fastq.gz
- path: output/prinseqplusplus/test_bad_out_R2.fastq.gz
- path: output/prinseqplusplus/test_good_out_R1.fastq.gz
- path: output/prinseqplusplus/test_good_out_R2.fastq.gz
- path: output/prinseqplusplus/test_single_out_R1.fastq.gz
- path: output/prinseqplusplus/test_single_out_R2.fastq.gz
- path: output/prinseqplusplus/versions.yml

View file

@ -0,0 +1,58 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SEQKIT_STATS } from '../../../../modules/seqkit/stats/main.nf'
workflow test_seqkit_stats_single_end {
input = [
[ id:'test', single_end:true ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
SEQKIT_STATS ( input )
}
workflow test_seqkit_stats_paired_end {
input = [
[ id:'test', single_end:false ], // meta map
[
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true)
]
]
SEQKIT_STATS ( input )
}
workflow test_seqkit_stats_nanopore {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['nanopore']['test_fastq_gz'], checkIfExists: true),
]
SEQKIT_STATS ( input )
}
workflow test_seqkit_stats_genome_fasta {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true),
]
SEQKIT_STATS ( input )
}
workflow test_seqkit_stats_transcriptome_fasta {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['genome']['transcriptome_fasta'], checkIfExists: true),
]
SEQKIT_STATS ( input )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,54 @@
- name: seqkit stats test_seqkit_stats_single_end
command: nextflow run tests/modules/seqkit/stats -entry test_seqkit_stats_single_end -c tests/config/nextflow.config
tags:
- seqkit/stats
- seqkit
files:
- path: output/seqkit/test.tsv
md5sum: e23227d089a7e04b0ec0cb547c4aadff
- path: output/seqkit/versions.yml
md5sum: d67f0c16feb9df77b11f6c91bbdf9926
- name: seqkit stats test_seqkit_stats_paired_end
command: nextflow run tests/modules/seqkit/stats -entry test_seqkit_stats_paired_end -c tests/config/nextflow.config
tags:
- seqkit/stats
- seqkit
files:
- path: output/seqkit/test.tsv
md5sum: 9de20dc39fb01285e3f0c382fda9db52
- path: output/seqkit/versions.yml
md5sum: bd8881933b953d07f2600e2e6a88ebf3
- name: seqkit stats test_seqkit_stats_nanopore
command: nextflow run tests/modules/seqkit/stats -entry test_seqkit_stats_nanopore -c tests/config/nextflow.config
tags:
- seqkit/stats
- seqkit
files:
- path: output/seqkit/test.tsv
md5sum: 5da1709eb5ae64fa3b2d624bffe2e7aa
- path: output/seqkit/versions.yml
md5sum: 565632701fbe048f7ba99f1865bd48ca
- name: seqkit stats test_seqkit_stats_genome_fasta
command: nextflow run tests/modules/seqkit/stats -entry test_seqkit_stats_genome_fasta -c tests/config/nextflow.config
tags:
- seqkit/stats
- seqkit
files:
- path: output/seqkit/test.tsv
md5sum: f64489767a4e769539ef3faf83260184
- path: output/seqkit/versions.yml
md5sum: 782fcdeaa922c8bb532ffa5808849d87
- name: seqkit stats test_seqkit_stats_transcriptome_fasta
command: nextflow run tests/modules/seqkit/stats -entry test_seqkit_stats_transcriptome_fasta -c tests/config/nextflow.config
tags:
- seqkit/stats
- seqkit
files:
- path: output/seqkit/test.tsv
md5sum: fbb975b665a08c8862fcd1268613a945
- path: output/seqkit/versions.yml
md5sum: db99b016d986d26102ec398264a58410

View file

@ -0,0 +1,24 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { HOMER_GROSEQ as HOMER_GROSEQ_BAM
HOMER_GROSEQ as HOMER_GROSEQ_BED } from '../../../../../subworkflows/nf-core/homer/groseq/main'
workflow test_homer_groseq_bam {
def input = []
input = [[ id: 'test' ],
[ file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true)]]
def fasta = [ file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true) ]
HOMER_GROSEQ_BAM ( input, fasta )
}
workflow test_homer_groseq_bed {
def input = []
input = [[ id: 'test' ],
[ file(params.test_data['sarscov2']['genome']['test_bed'], checkIfExists: true)]]
def fasta = [ file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true) ]
HOMER_GROSEQ_BED ( input, fasta )
}

View file

@ -0,0 +1,9 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: '.*:HOMER_GROSEQ_BED:HOMER_MAKETAGDIRECTORY' {
ext.args = "-checkGC -format bed"
}
}

View file

@ -0,0 +1,27 @@
- name: subworkflow homer_groseq bam
command: nextflow run ./tests/subworkflows/nf-core/homer/groseq/ -entry test_homer_groseq_bam -c tests/config/nextflow.config -c tests/subworkflows/nf-core/homer/groseq/nextflow.config
tags:
- homer
files:
- path: output/homer/test.bed
md5sum: 8d40034dfe22c5cf973071aa1e8d3617
- path: output/homer/test.bedGraph.gz
md5sum: de2b2f8ab90a909b8bfbe755bdaba407
- path: output/homer/test.peaks.txt
md5sum: 8d40034dfe22c5cf973071aa1e8d3617
- path: output/homer/versions.yml
md5sum: c85dee03f1afabe406a87743a4c5506d
- name: subworkflow homer_groseq bed
command: nextflow run ./tests/subworkflows/nf-core/homer/groseq/ -entry test_homer_groseq_bed -c tests/config/nextflow.config -c tests/subworkflows/nf-core/homer/groseq/nextflow.config
tags:
- homer
files:
- path: output/homer/test.bed
md5sum: 25e8b64946012d1c4567a04062e90fae
- path: output/homer/test.bedGraph.gz
md5sum: 2d2d1c2d3242ff74c7a922695accb9d2
- path: output/homer/test.peaks.txt
md5sum: 25e8b64946012d1c4567a04062e90fae
- path: output/homer/versions.yml
md5sum: c9b5f1248d28c216b000cba8da738455