1
0
Fork 0
mirror of https://github.com/MillironX/taxprofiler.git synced 2024-11-25 19:49:55 +00:00

Merge branch 'dev' of https://github.com/mjamy/taxprofiler into add-krakenuniq-module

This commit is contained in:
Mahwash Jamy 2022-10-31 15:36:57 +00:00
commit 3147a80a56
29 changed files with 648 additions and 24 deletions

View file

@ -23,6 +23,7 @@ jobs:
- "21.10.3" - "21.10.3"
- "latest-everything" - "latest-everything"
parameters: parameters:
- "--preprocessing_qc_tool falco"
- "--perform_longread_qc false" - "--perform_longread_qc false"
- "--perform_shortread_qc false" - "--perform_shortread_qc false"
- "--shortread_qc_tool fastp" - "--shortread_qc_tool fastp"

View file

@ -36,6 +36,10 @@
> Wood, Derrick E., Jennifer Lu, and Ben Langmead. 2019. Improved Metagenomic Analysis with Kraken 2. Genome Biology 20 (1): 257. doi: 10.1186/s13059-019-1891-0. > Wood, Derrick E., Jennifer Lu, and Ben Langmead. 2019. Improved Metagenomic Analysis with Kraken 2. Genome Biology 20 (1): 257. doi: 10.1186/s13059-019-1891-0.
- [Bracken](https://doi.org/10.7717/peerj-cs.104)
> Lu, J., Breitwieser, F. P., Thielen, P., & Salzberg, S. L. (2017). Bracken: Estimating species abundance in metagenomics data. PeerJ Computer Science, 3, e104. doi: 10.7717/peerj-cs.104
- [Krona](https://doi.org/10.1186/1471-2105-12-385) - [Krona](https://doi.org/10.1186/1471-2105-12-385)
> Ondov, Brian D., Nicholas H. Bergman, and Adam M. Phillippy. 2011. Interactive metagenomic visualization in a Web browser. BMC Bioinformatics 12 (1): 385. doi: 10.1186/1471-2105-12-385. > Ondov, Brian D., Nicholas H. Bergman, and Adam M. Phillippy. 2011. Interactive metagenomic visualization in a Web browser. BMC Bioinformatics 12 (1): 385. doi: 10.1186/1471-2105-12-385.
@ -62,6 +66,10 @@
- [FILTLONG](https://github.com/rrwick/Filtlong) - [FILTLONG](https://github.com/rrwick/Filtlong)
- [falco](https://doi.org/10.12688/f1000research.21142.2)
> de Sena Brandine G and Smith AD. Falco: high-speed FastQC emulation for quality control of sequencing data. F1000Research 2021, 8:1874
## Software packaging/containerisation tools ## Software packaging/containerisation tools
- [Anaconda](https://anaconda.com) - [Anaconda](https://anaconda.com)

View file

@ -30,7 +30,7 @@ On release, automated continuous integration tests run the pipeline on a full-si
![](docs/images/taxprofiler_tube.png) ![](docs/images/taxprofiler_tube.png)
1. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)) 1. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) or [`falco`](https://github.com/smithlabcode/falco) as an alternative option)
2. Performs optional read pre-processing 2. Performs optional read pre-processing
- Adapter clipping and merging (short-read: [fastp](https://github.com/OpenGene/fastp), [AdapterRemoval2](https://github.com/MikkelSchubert/adapterremoval); long-read: [porechop](https://github.com/rrwick/Porechop)) - Adapter clipping and merging (short-read: [fastp](https://github.com/OpenGene/fastp), [AdapterRemoval2](https://github.com/MikkelSchubert/adapterremoval); long-read: [porechop](https://github.com/rrwick/Porechop))
- Low complexity and quality filtering (short-read: [bbduk](https://jgi.doe.gov/data-and-tools/software-tools/bbtools/), [PRINSEQ++](https://github.com/Adrian-Cantu/PRINSEQ-plus-plus); long-read: [Filtlong](https://github.com/rrwick/Filtlong)) - Low complexity and quality filtering (short-read: [bbduk](https://jgi.doe.gov/data-and-tools/software-tools/bbtools/), [PRINSEQ++](https://github.com/Adrian-Cantu/PRINSEQ-plus-plus); long-read: [Filtlong](https://github.com/rrwick/Filtlong))

View file

@ -21,6 +21,7 @@ run_modules:
- adapterRemoval - adapterRemoval
- fastp - fastp
- bowtie2 - bowtie2
- samtools
- kraken - kraken
- malt - malt
- custom_content - custom_content

View file

@ -40,6 +40,24 @@ process {
] ]
} }
withName: FALCO {
ext.prefix = { "${meta.id}_${meta.run_accession}_raw" }
publishDir = [
path: { "${params.outdir}/falco/raw" },
mode: params.publish_dir_mode,
pattern: '*.{html,txt}'
]
}
withName: FALCO_PROCESSED {
ext.prefix = { "${meta.id}_${meta.run_accession}_processed" }
publishDir = [
path: { "${params.outdir}/falco/processed" },
mode: params.publish_dir_mode,
pattern: '*.{html,txt}'
]
}
withName: FASTP_SINGLE { withName: FASTP_SINGLE {
ext.args = [ ext.args = [
// trimming options // trimming options
@ -215,6 +233,16 @@ process {
] ]
} }
withName: SAMTOOLS_STATS {
ext.prefix = { "${meta.id}_${meta.run_accession}" }
publishDir = [
path: { "${params.outdir}/samtools/stats" },
mode: params.publish_dir_mode,
enabled: params.save_hostremoval_unmapped,
pattern: '*stats'
]
}
withName: BBMAP_BBDUK { withName: BBMAP_BBDUK {
ext.args = [ ext.args = [
"entropy=${params.shortread_complexityfilter_entropy}", "entropy=${params.shortread_complexityfilter_entropy}",
@ -277,7 +305,7 @@ process {
} }
withName: KRAKEN2_KRAKEN2 { withName: KRAKEN2_KRAKEN2 {
ext.args = { "${meta.db_params}" } ext.args = params.kraken2_save_minimizers ? { "${meta.db_params} --report-minimizer-data" } : { "${meta.db_params}" }
ext.prefix = params.perform_runmerging ? { "${meta.id}-${meta.db_name}" } : { "${meta.id}-${meta.run_accession}-${meta.db_name}" } ext.prefix = params.perform_runmerging ? { "${meta.id}-${meta.db_name}" } : { "${meta.id}-${meta.run_accession}-${meta.db_name}" }
publishDir = [ publishDir = [
path: { "${params.outdir}/kraken2/${meta.db_name}/" }, path: { "${params.outdir}/kraken2/${meta.db_name}/" },
@ -286,6 +314,16 @@ process {
] ]
} }
withName: BRACKEN_BRACKEN {
errorStrategy = 'ignore'
ext.prefix = params.perform_runmerging ? { "${meta.id}-${meta.db_name}" } : { "${meta.id}-${meta.run_accession}-${meta.db_name}" }
publishDir = [
path: { "${params.outdir}/bracken/${meta.db_name}/" },
mode: params.publish_dir_mode,
pattern: '*.tsv'
]
}
withName: KRAKENTOOLS_COMBINEKREPORTS { withName: KRAKENTOOLS_COMBINEKREPORTS {
ext.prefix = { "kraken2_${meta.id}_combined_reports" } ext.prefix = { "kraken2_${meta.id}_combined_reports" }
publishDir = [ publishDir = [

View file

@ -34,6 +34,7 @@ params {
hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta' hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
run_kaiju = true run_kaiju = true
run_kraken2 = true run_kraken2 = true
run_bracken = true
run_malt = true run_malt = true
run_metaphlan3 = true run_metaphlan3 = true
run_centrifuge = true run_centrifuge = true

View file

@ -33,6 +33,7 @@ params {
hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta' hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
run_kaiju = false run_kaiju = false
run_kraken2 = false run_kraken2 = false
run_bracken = false
run_malt = false run_malt = false
run_metaphlan3 = false run_metaphlan3 = false
run_centrifuge = false run_centrifuge = false

View file

@ -33,6 +33,7 @@ params {
hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta' hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
run_kaiju = true run_kaiju = true
run_kraken2 = true run_kraken2 = true
run_bracken = true
run_malt = true run_malt = true
run_metaphlan3 = true run_metaphlan3 = true
run_centrifuge = true run_centrifuge = true

View file

@ -34,6 +34,7 @@ params {
hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta' hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
run_kaiju = false run_kaiju = false
run_kraken2 = false run_kraken2 = false
run_bracken = false
run_malt = false run_malt = false
run_metaphlan3 = false run_metaphlan3 = false
run_centrifuge = false run_centrifuge = false

View file

@ -33,6 +33,7 @@ params {
hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta' hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
run_kaiju = false run_kaiju = false
run_kraken2 = false run_kraken2 = false
run_bracken = false
run_malt = false run_malt = false
run_metaphlan3 = false run_metaphlan3 = false
run_centrifuge = false run_centrifuge = false

View file

@ -19,6 +19,7 @@ params {
hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta' hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
run_kaiju = true run_kaiju = true
run_kraken2 = true run_kraken2 = true
run_bracken = true
run_malt = true run_malt = true
run_metaphlan3 = true run_metaphlan3 = true
run_centrifuge = true run_centrifuge = true

View file

@ -74,13 +74,13 @@ The pipeline takes the locations and specific profiling parameters of the tool o
> ⚠️ nf-core/taxprofiler does not provide any databases by default, nor does it currently generate them for you. This must be performed manually by the user. See below for more information of the expected database files. > ⚠️ nf-core/taxprofiler does not provide any databases by default, nor does it currently generate them for you. This must be performed manually by the user. See below for more information of the expected database files.
An example database sheet can look as follows, where 4 tools are being used, and `malt` and `kraken2` will be used against two databases each. An example database sheet can look as follows, where 4 tools are being used, and `malt` and `kraken2` will be used against two databases each. This is because specifying `bracken` implies first running `kraken2` on the same database.
```console ```console
tool,db_name,db_params,db_path tool,db_name,db_params,db_path
malt,malt85,-id 85,/<path>/<to>/malt/testdb-malt/ malt,malt85,-id 85,/<path>/<to>/malt/testdb-malt/
malt,malt95,-id 90,/<path>/<to>/malt/testdb-malt.tar.gz malt,malt95,-id 90,/<path>/<to>/malt/testdb-malt.tar.gz
kraken2,db1,,/<path>/<to>/kraken2/testdb-kraken2.tar.gz bracken,db1,,/<path>/<to>/bracken/testdb-bracken.tar.gz
kraken2,db2,--quick,/<path>/<to>/kraken2/testdb-kraken2.tar.gz kraken2,db2,--quick,/<path>/<to>/kraken2/testdb-kraken2.tar.gz
centrifuge,db1,,/<path>/<to>/centrifuge/minigut_cf.tar.gz centrifuge,db1,,/<path>/<to>/centrifuge/minigut_cf.tar.gz
metaphlan3,db1,,/<path>/<to>/metaphlan3/metaphlan_database/ metaphlan3,db1,,/<path>/<to>/metaphlan3/metaphlan_database/
@ -91,8 +91,8 @@ Column specifications are as follows:
| Column | Description | | Column | Description |
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `tool` | Taxonomic profiling tool (supported by nf-core/taxprofiler) that the database has been indexed for [required]. | | `tool` | Taxonomic profiling tool (supported by nf-core/taxprofiler) that the database has been indexed for [required]. Please note that `bracken` also implies running `kraken2` on the same database. |
| `db_name` | A unique name of the particular database [required]. | | `db_name` | A unique name per tool for the particular database [required]. Please note that names need to be unique across both `kraken2` and `bracken` as well, even if re-using the same database. |
| `db_params` | Any parameters of the given taxonomic profiler that you wish to specify that the taxonomic profiling tool should use when profiling against this specific. Can be empty to use taxonomic profiler defaults. Must not be surrounded by quotes [required]. We generally do not recommend specifying parameters here that turn on/off saving of output files or specifying particular file extensions - this should be already addressed via pipeline parameters. | | `db_params` | Any parameters of the given taxonomic profiler that you wish to specify that the taxonomic profiling tool should use when profiling against this specific. Can be empty to use taxonomic profiler defaults. Must not be surrounded by quotes [required]. We generally do not recommend specifying parameters here that turn on/off saving of output files or specifying particular file extensions - this should be already addressed via pipeline parameters. |
| `db_path` | Path to the database. Can either be a path to a directory containing the database index files or a `.tar.gz` file which contains the compressed database directory with the same name as the tar archive, minus `.tar.gz` [required]. | | `db_path` | Path to the database. Can either be a path to a directory containing the database index files or a `.tar.gz` file which contains the compressed database directory with the same name as the tar archive, minus `.tar.gz` [required]. |
@ -116,6 +116,15 @@ Expected (uncompressed) database files for each tool are as follows:
- `opts.k2d` - `opts.k2d`
- `hash.k2d` - `hash.k2d`
- `taxo.k2d` - `taxo.k2d`
- **Bracken** output of a combined `kraken2-` and `bracken-build` process. Please see the [documentation on Bracken](https://github.com/jenniferlu717/Bracken#running-bracken-easy-version) for details. The output is a directory containing files per expected sequencing read length similarly to:
- `hash.k2d`
- `opts.k2d`
- `taxo.k2d`
- `database.kraken`
- `database100mers.kmer_distrib`
- `database100mers.kraken`
- `database150mers.kmer_distrib`
- `database150mers.kraken`
- **Centrifuge** output of `centrifuge-build`. A directory containing: - **Centrifuge** output of `centrifuge-build`. A directory containing:
- `<database_name>.<number>.cf` - `<database_name>.<number>.cf`
- `<database_name>.<number>.cf` - `<database_name>.<number>.cf`
@ -166,6 +175,10 @@ work # Directory containing the nextflow working files
# Other nextflow hidden files, eg. history of pipeline runs and old logs. # Other nextflow hidden files, eg. history of pipeline runs and old logs.
``` ```
### Sequencing quality control
nf-core taxprofiler offers [`falco`](https://github.com/smithlabcode/falco] as an alternative option to [`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/).
### Preprocessing Steps ### Preprocessing Steps
nf-core/taxprofiler offers four main preprocessing steps nf-core/taxprofiler offers four main preprocessing steps
@ -179,7 +192,7 @@ nf-core/taxprofiler offers four main preprocessing steps
Raw sequencing read processing in the form of adapter clipping and paired-end read merging can be activated via the `--perform_shortread_qc` or `--perform_longread_qc` flags. Raw sequencing read processing in the form of adapter clipping and paired-end read merging can be activated via the `--perform_shortread_qc` or `--perform_longread_qc` flags.
It is highly recommended to run this on raw reads to remove artefacts from sequencing that can cause false positive identification of taxa (e.g. contaminated reference genomes) and/or skews in taxonomic abundance profiles. It is highly recommended to run this on raw reads to remove artifacts from sequencing that can cause false positive identification of taxa (e.g. contaminated reference genomes) and/or skews in taxonomic abundance profiles.
There are currently two options for short-read preprocessing: `fastp` or `adapterremoval`. There are currently two options for short-read preprocessing: `fastp` or `adapterremoval`.

View file

@ -21,6 +21,10 @@
"branch": "master", "branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905" "git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"
}, },
"bracken/bracken": {
"branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"
},
"cat/fastq": { "cat/fastq": {
"branch": "master", "branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905" "git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"
@ -49,6 +53,10 @@
"branch": "master", "branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905" "git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"
}, },
"falco": {
"branch": "master",
"git_sha": "fc959214036403ad83efe7a41d43d0606c445cda"
},
"fastp": { "fastp": {
"branch": "master", "branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905" "git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"
@ -149,6 +157,14 @@
"branch": "master", "branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905" "git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"
}, },
"samtools/index": {
"branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"
},
"samtools/stats": {
"branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"
},
"samtools/view": { "samtools/view": {
"branch": "master", "branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905" "git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"

View file

@ -0,0 +1,32 @@
process KRAKEN2_STANDARD_REPORT {
tag "$meta.id"
label 'process_single'
conda (params.enable_conda ? 'conda-forge::sed=4.8' : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://containers.biocontainers.pro/s3/SingImgsRepo/biocontainers/v1.2.0_cv2/biocontainers_v1.2.0_cv2.img' :
'biocontainers/biocontainers:v1.2.0_cv2' }"
input:
tuple val(meta), path(report)
output:
tuple val(meta), path(result), emit: report
path 'versions.yml' , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def prefix = task.ext.prefix ?: "${meta.id}"
result = "${prefix}_standardized.kraken2.report.txt"
"""
cut -f1-3,6-8 '${report}' > '${result}'
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cut: \$(echo \$(cut --version 2>&1) | sed 's/^.*(GNU coreutils) //; s/ Copyright.*\$//')
END_VERSIONS
"""
}

45
modules/nf-core/bracken/bracken/main.nf generated Normal file
View file

@ -0,0 +1,45 @@
process BRACKEN_BRACKEN {
tag "$meta.id"
label 'process_low'
// WARN: Version information not provided by tool on CLI.
// Please update version string below when bumping container versions.
conda (params.enable_conda ? "bioconda::bracken=2.7" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/bracken:2.7--py39hc16433a_0':
'quay.io/biocontainers/bracken:2.7--py39hc16433a_0' }"
input:
tuple val(meta), path(kraken_report)
path database
output:
tuple val(meta), path(bracken_report), emit: reports
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def threshold = meta.threshold ?: 10
def taxonomic_level = meta.taxonomic_level ?: 'S'
def read_length = meta.read_length ?: 150
def args = task.ext.args ?: "-l ${taxonomic_level} -t ${threshold} -r ${read_length}"
def prefix = task.ext.prefix ?: "${meta.id}"
bracken_report = "${prefix}_${taxonomic_level}.tsv"
// WARN: Version information not provided by tool on CLI.
// Please update version string below when bumping container versions.
def VERSION = '2.7'
"""
bracken \\
${args} \\
-d '${database}' \\
-i '${kraken_report}' \\
-o '${bracken_report}'
cat <<-END_VERSIONS > versions.yml
"${task.process}":
bracken: ${VERSION}
END_VERSIONS
"""
}

45
modules/nf-core/bracken/bracken/meta.yml generated Normal file
View file

@ -0,0 +1,45 @@
name: bracken_bracken
description: Re-estimate taxonomic abundance of metagenomic samples analyzed by kraken.
keywords:
- sort
tools:
- bracken:
description: Bracken (Bayesian Reestimation of Abundance with KrakEN) is a highly accurate statistical method that computes the abundance of species in DNA sequences from a metagenomics sample.
homepage: https://ccb.jhu.edu/software/bracken/
documentation: https://ccb.jhu.edu/software/bracken/index.shtml?t=manual
tool_dev_url: https://github.com/jenniferlu717/Bracken
doi: "10.7717/peerj-cs.104"
licence: ["GPL v3"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- kraken_report:
type: file
description: TSV file with six columns coming from kraken2 output
pattern: "*.{tsv}"
- database:
type: file
description: Directory containing the kraken2/Bracken files for analysis
pattern: "*"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- reports:
type: file
description: TSV output report of the re-estimated abundances
pattern: "*.{tsv}"
authors:
- "@Midnighter"

57
modules/nf-core/falco/main.nf generated Normal file
View file

@ -0,0 +1,57 @@
process FALCO {
tag "$meta.id"
label 'process_single'
conda (params.enable_conda ? "bioconda::falco=1.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/falco:1.2.1--h867801b_3':
'quay.io/biocontainers/falco:1.2.1--h867801b_3' }"
input:
tuple val(meta), path(reads)
output:
tuple val(meta), path("*.html"), emit: html
tuple val(meta), path("*.txt") , emit: txt
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
if ( reads.toList().size() == 1 ) {
"""
falco $args --threads $task.cpus ${reads} -D ${prefix}_data.txt -S ${prefix}_summary.txt -R ${prefix}_report.html
cat <<-END_VERSIONS > versions.yml
"${task.process}":
falco:\$( falco --version | sed -e "s/falco//g" )
END_VERSIONS
"""
} else {
"""
falco $args --threads $task.cpus ${reads}
cat <<-END_VERSIONS > versions.yml
"${task.process}":
falco:\$( falco --version | sed -e "s/falco//g" )
END_VERSIONS
"""
}
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}_data.txt
touch ${prefix}_fastqc_data.html
touch ${prefix}_summary.txt
cat <<-END_VERSIONS > versions.yml
"${task.process}":
falco: \$( falco --version | sed -e "s/falco v//g" )
END_VERSIONS
"""
}

52
modules/nf-core/falco/meta.yml generated Normal file
View file

@ -0,0 +1,52 @@
name: falco
description: Run falco on sequenced reads
keywords:
- quality control
- qc
- adapters
- fastq
tools:
- fastqc:
description: "falco is a drop-in C++ implementation of FastQC to assess the quality of sequence reads."
homepage: "https://falco.readthedocs.io/"
documentation: "https://falco.readthedocs.io/"
tool_dev_url: "None"
doi: ""
licence: "['GPL v3']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: |
List of input FastQ files of size 1 and 2 for single-end and paired-end data,
respectively.
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- html:
type: file
description: FastQC like report
pattern: "*_{fastqc_report.html}"
- txt:
type: file
description: falco report data
pattern: "*_{data.txt}"
- txt:
type: file
description: falco summary file
pattern: "*_{summary.txt}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@lucacozzuto"

48
modules/nf-core/samtools/index/main.nf generated Normal file
View file

@ -0,0 +1,48 @@
process SAMTOOLS_INDEX {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
input:
tuple val(meta), path(input)
output:
tuple val(meta), path("*.bai") , optional:true, emit: bai
tuple val(meta), path("*.csi") , optional:true, emit: csi
tuple val(meta), path("*.crai"), optional:true, emit: crai
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
"""
samtools \\
index \\
-@ ${task.cpus-1} \\
$args \\
$input
cat <<-END_VERSIONS > versions.yml
"${task.process}":
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
stub:
"""
touch ${input}.bai
touch ${input}.crai
touch ${input}.csi
cat <<-END_VERSIONS > versions.yml
"${task.process}":
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
}

53
modules/nf-core/samtools/index/meta.yml generated Normal file
View file

@ -0,0 +1,53 @@
name: samtools_index
description: Index SAM/BAM/CRAM file
keywords:
- index
- bam
- sam
- cram
tools:
- samtools:
description: |
SAMtools is a set of utilities for interacting with and post-processing
short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li.
These files are generated as output by short read aligners like BWA.
homepage: http://www.htslib.org/
documentation: hhttp://www.htslib.org/doc/samtools.html
doi: 10.1093/bioinformatics/btp352
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: BAM/CRAM/SAM file
pattern: "*.{bam,cram,sam}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bai:
type: file
description: BAM/CRAM/SAM index file
pattern: "*.{bai,crai,sai}"
- crai:
type: file
description: BAM/CRAM/SAM index file
pattern: "*.{bai,crai,sai}"
- csi:
type: file
description: CSI index file
pattern: "*.{csi}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@drpatelh"
- "@ewels"
- "@maxulysse"

49
modules/nf-core/samtools/stats/main.nf generated Normal file
View file

@ -0,0 +1,49 @@
process SAMTOOLS_STATS {
tag "$meta.id"
label 'process_single'
conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
input:
tuple val(meta), path(input), path(input_index)
path fasta
output:
tuple val(meta), path("*.stats"), emit: stats
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def reference = fasta ? "--reference ${fasta}" : ""
"""
samtools \\
stats \\
--threads ${task.cpus} \\
${reference} \\
${input} \\
> ${prefix}.stats
cat <<-END_VERSIONS > versions.yml
"${task.process}":
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.stats
cat <<-END_VERSIONS > versions.yml
"${task.process}":
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
}

53
modules/nf-core/samtools/stats/meta.yml generated Normal file
View file

@ -0,0 +1,53 @@
name: samtools_stats
description: Produces comprehensive statistics from SAM/BAM/CRAM file
keywords:
- statistics
- counts
- bam
- sam
- cram
tools:
- samtools:
description: |
SAMtools is a set of utilities for interacting with and post-processing
short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li.
These files are generated as output by short read aligners like BWA.
homepage: http://www.htslib.org/
documentation: hhttp://www.htslib.org/doc/samtools.html
doi: 10.1093/bioinformatics/btp352
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- input:
type: file
description: BAM/CRAM file from alignment
pattern: "*.{bam,cram}"
- input_index:
type: file
description: BAI/CRAI file from alignment
pattern: "*.{bai,crai}"
- fasta:
type: optional file
description: Reference file the CRAM was created with
pattern: "*.{fasta,fa}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- stats:
type: file
description: File containing samtools stats output
pattern: "*.{stats}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@drpatelh"
- "@FriederikeHanssen"

View file

@ -59,6 +59,8 @@ params {
// Databases // Databases
databases = null databases = null
preprocessing_qc_tool = 'fastqc'
// FASTQ preprocessing // FASTQ preprocessing
perform_shortread_qc = false perform_shortread_qc = false
shortread_qc_tool = 'fastp' shortread_qc_tool = 'fastp'
@ -114,6 +116,10 @@ params {
run_kraken2 = false run_kraken2 = false
kraken2_save_reads = false // added directly to module in profiling.nf kraken2_save_reads = false // added directly to module in profiling.nf
kraken2_save_readclassification = false // added directly to module in profiling.nf kraken2_save_readclassification = false // added directly to module in profiling.nf
kraken2_save_minimizers = false
// Bracken
run_bracken = false
// centrifuge // centrifuge
run_centrifuge = false run_centrifuge = false

View file

@ -10,7 +10,7 @@
"type": "object", "type": "object",
"fa_icon": "fas fa-terminal", "fa_icon": "fas fa-terminal",
"description": "Define where the pipeline should find input data and save output data.", "description": "Define where the pipeline should find input data and save output data.",
"required": ["input", "outdir", "databases"], "required": ["input", "databases", "outdir"],
"properties": { "properties": {
"input": { "input": {
"type": "string", "type": "string",
@ -382,6 +382,17 @@
"description": "Turn on saving of Kraken2 per-read taxonomic assignment file", "description": "Turn on saving of Kraken2 per-read taxonomic assignment file",
"help_text": "Save a text file that contains a list of each read that had a taxonomic assignment, with information on specific taxonomic taxonomic assignment that that read recieved.\n\n> Modifies tool parameter(s):\n> - kraken2: `--output`" "help_text": "Save a text file that contains a list of each read that had a taxonomic assignment, with information on specific taxonomic taxonomic assignment that that read recieved.\n\n> Modifies tool parameter(s):\n> - kraken2: `--output`"
}, },
"kraken2_save_minimizers": {
"type": "boolean",
"description": "Turn on saving minimizer information in the kraken2 report thus increasing to an eight column layout.",
"fa_icon": "fas fa-save",
"help_text": "Turn on saving minimizer information in the kraken2 report thus increasing to an eight column layout.\n\nAdds `--report-minimizer-data` to the kraken2 command."
},
"run_bracken": {
"type": "boolean",
"description": "Post-process kraken2 reports with Bracken.",
"fa_icon": "fas fa-toggle-on"
},
"run_malt": { "run_malt": {
"type": "boolean", "type": "boolean",
"fa_icon": "fas fa-toggle-on", "fa_icon": "fas fa-toggle-on",
@ -707,5 +718,14 @@
{ {
"$ref": "#/definitions/reference_genome_options" "$ref": "#/definitions/reference_genome_options"
} }
] ],
"properties": {
"preprocessing_qc_tool": {
"type": "string",
"default": "fastqc",
"enum": ["fastqc", "falco"],
"help_text": "Falco is designed as a drop-in replacement for FastQC but written in C++ for faster computation. We particularly recommend using falco when using long reads (due to reduced memory constraints), however is also applicable for short reads.",
"description": "Specify the tool used for quality control of raw sequencing reads"
}
}
} }

View file

@ -6,6 +6,8 @@ include { MINIMAP2_INDEX } from '../../modules/nf-core/minimap2/inde
include { MINIMAP2_ALIGN } from '../../modules/nf-core/minimap2/align/main' include { MINIMAP2_ALIGN } from '../../modules/nf-core/minimap2/align/main'
include { SAMTOOLS_VIEW } from '../../modules/nf-core/samtools/view/main' include { SAMTOOLS_VIEW } from '../../modules/nf-core/samtools/view/main'
include { SAMTOOLS_BAM2FQ } from '../../modules/nf-core/samtools/bam2fq/main' include { SAMTOOLS_BAM2FQ } from '../../modules/nf-core/samtools/bam2fq/main'
include { SAMTOOLS_INDEX } from '../../modules/nf-core/samtools/index/main'
include { SAMTOOLS_STATS } from '../../modules/nf-core/samtools/stats/main'
workflow LONGREAD_HOSTREMOVAL { workflow LONGREAD_HOSTREMOVAL {
take: take:
@ -39,9 +41,21 @@ workflow LONGREAD_HOSTREMOVAL {
SAMTOOLS_BAM2FQ ( SAMTOOLS_VIEW.out.bam, false ) SAMTOOLS_BAM2FQ ( SAMTOOLS_VIEW.out.bam, false )
ch_versions = ch_versions.mix( SAMTOOLS_BAM2FQ.out.versions.first() ) ch_versions = ch_versions.mix( SAMTOOLS_BAM2FQ.out.versions.first() )
SAMTOOLS_INDEX ( SAMTOOLS_VIEW.out.bam )
ch_versions = ch_versions.mix( SAMTOOLS_INDEX.out.versions.first() )
bam_bai = SAMTOOLS_VIEW.out.bam
.join(SAMTOOLS_INDEX.out.bai, remainder: true)
SAMTOOLS_STATS ( bam_bai, reference )
ch_versions = ch_versions.mix(SAMTOOLS_STATS.out.versions.first())
ch_multiqc_files = ch_multiqc_files.mix( SAMTOOLS_STATS.out.stats )
emit: emit:
stats = SAMTOOLS_STATS.out.stats //channel: [val(meta), [reads ] ]
reads = SAMTOOLS_BAM2FQ.out.reads // channel: [ val(meta), [ reads ] ] reads = SAMTOOLS_BAM2FQ.out.reads // channel: [ val(meta), [ reads ] ]
versions = ch_versions // channel: [ versions.yml ] versions = ch_versions // channel: [ versions.yml ]
mqc = ch_multiqc_files
} }

View file

@ -3,6 +3,8 @@
// //
include { FASTQC as FASTQC_PROCESSED } from '../../modules/nf-core/fastqc/main' include { FASTQC as FASTQC_PROCESSED } from '../../modules/nf-core/fastqc/main'
include { FALCO as FALCO_PROCESSED } from '../../modules/nf-core/falco/main'
include { PORECHOP } from '../../modules/nf-core/porechop/main' include { PORECHOP } from '../../modules/nf-core/porechop/main'
include { FILTLONG } from '../../modules/nf-core/filtlong/main' include { FILTLONG } from '../../modules/nf-core/filtlong/main'
@ -52,9 +54,17 @@ workflow LONGREAD_PREPROCESSING {
ch_multiqc_files = ch_multiqc_files.mix( FILTLONG.out.log ) ch_multiqc_files = ch_multiqc_files.mix( FILTLONG.out.log )
} }
if (params.preprocessing_qc_tool == 'fastqc') {
FASTQC_PROCESSED ( ch_processed_reads ) FASTQC_PROCESSED ( ch_processed_reads )
ch_versions = ch_versions.mix( FASTQC_PROCESSED.out.versions )
ch_multiqc_files = ch_multiqc_files.mix( FASTQC_PROCESSED.out.zip ) ch_multiqc_files = ch_multiqc_files.mix( FASTQC_PROCESSED.out.zip )
} else if (params.preprocessing_qc_tool == 'falco') {
FALCO_PROCESSED ( ch_processed_reads )
ch_versions = ch_versions.mix( FALCO_PROCESSED.out.versions )
ch_multiqc_files = ch_multiqc_files.mix( FALCO_PROCESSED.out.txt )
}
emit: emit:
reads = ch_processed_reads // channel: [ val(meta), [ reads ] ] reads = ch_processed_reads // channel: [ val(meta), [ reads ] ]
versions = ch_versions // channel: [ versions.yml ] versions = ch_versions // channel: [ versions.yml ]

View file

@ -5,6 +5,8 @@
include { MALT_RUN } from '../../modules/nf-core/malt/run/main' include { MALT_RUN } from '../../modules/nf-core/malt/run/main'
include { MEGAN_RMA2INFO as MEGAN_RMA2INFO_TSV } from '../../modules/nf-core/megan/rma2info/main' include { MEGAN_RMA2INFO as MEGAN_RMA2INFO_TSV } from '../../modules/nf-core/megan/rma2info/main'
include { KRAKEN2_KRAKEN2 } from '../../modules/nf-core/kraken2/kraken2/main' include { KRAKEN2_KRAKEN2 } from '../../modules/nf-core/kraken2/kraken2/main'
include { KRAKEN2_STANDARD_REPORT } from '../../modules/local/kraken2_standard_report'
include { BRACKEN_BRACKEN } from '../../modules/nf-core/bracken/bracken/main'
include { CENTRIFUGE_CENTRIFUGE } from '../../modules/nf-core/centrifuge/centrifuge/main' include { CENTRIFUGE_CENTRIFUGE } from '../../modules/nf-core/centrifuge/centrifuge/main'
include { CENTRIFUGE_KREPORT } from '../../modules/nf-core/centrifuge/kreport/main' include { CENTRIFUGE_KREPORT } from '../../modules/nf-core/centrifuge/kreport/main'
include { METAPHLAN3_METAPHLAN3 } from '../../modules/nf-core/metaphlan3/metaphlan3/main' include { METAPHLAN3_METAPHLAN3 } from '../../modules/nf-core/metaphlan3/metaphlan3/main'
@ -40,7 +42,7 @@ workflow PROFILING {
.combine(databases) .combine(databases)
.branch { .branch {
malt: it[2]['tool'] == 'malt' malt: it[2]['tool'] == 'malt'
kraken2: it[2]['tool'] == 'kraken2' kraken2: it[2]['tool'] == 'kraken2' || it[2]['tool'] == 'bracken' // to reuse the kraken module to produce the input data for bracken
metaphlan3: it[2]['tool'] == 'metaphlan3' metaphlan3: it[2]['tool'] == 'metaphlan3'
centrifuge: it[2]['tool'] == 'centrifuge' centrifuge: it[2]['tool'] == 'centrifuge'
kaiju: it[2]['tool'] == 'kaiju' kaiju: it[2]['tool'] == 'kaiju'
@ -131,7 +133,42 @@ workflow PROFILING {
ch_multiqc_files = ch_multiqc_files.mix( KRAKEN2_KRAKEN2.out.report ) ch_multiqc_files = ch_multiqc_files.mix( KRAKEN2_KRAKEN2.out.report )
ch_versions = ch_versions.mix( KRAKEN2_KRAKEN2.out.versions.first() ) ch_versions = ch_versions.mix( KRAKEN2_KRAKEN2.out.versions.first() )
ch_raw_classifications = ch_raw_classifications.mix( KRAKEN2_KRAKEN2.out.classified_reads_assignment ) ch_raw_classifications = ch_raw_classifications.mix( KRAKEN2_KRAKEN2.out.classified_reads_assignment )
ch_raw_profiles = ch_raw_profiles.mix( KRAKEN2_KRAKEN2.out.report ) ch_raw_profiles = ch_raw_profiles.mix(
KRAKEN2_KRAKEN2.out.report
// Set the tool to be strictly 'kraken2' instead of potentially 'bracken' for downstream use.
// Will remain distinct from 'pure' Kraken2 results due to distinct database names in file names.
.map { meta, report -> [meta + [tool: 'kraken2'], report]}
)
}
if ( params.run_kraken2 && params.run_bracken ) {
// Remove files from 'pure' kraken2 runs, so only those aligned against Bracken & kraken2 database are used.
def ch_kraken2_output = KRAKEN2_KRAKEN2.out.report
.filter { meta, report -> meta['tool'] == 'bracken' }
// If necessary, convert the eight column output to six column output.
if (params.kraken2_save_minimizers) {
ch_kraken2_output = KRAKEN2_STANDARD_REPORT(ch_kraken2_output).report
}
// Extract the database name to combine by.
ch_bracken_databases = databases
.filter { meta, db -> meta['tool'] == 'bracken' }
.map { meta, db -> [meta['db_name'], meta, db] }
// Extract the database name to combine by.
ch_input_for_bracken = ch_kraken2_output
.map { meta, report -> [meta['db_name'], meta, report] }
.combine(ch_bracken_databases, by: 0)
.multiMap { key, meta, report, db_meta, db ->
report: [meta + db_meta, report]
db: db
}
BRACKEN_BRACKEN(ch_input_for_bracken.report, ch_input_for_bracken.db)
ch_versions = ch_versions.mix(BRACKEN_BRACKEN.out.versions.first())
ch_raw_profiles = ch_raw_profiles.mix(BRACKEN_BRACKEN.out.reports)
} }

View file

@ -6,6 +6,7 @@
include { SHORTREAD_FASTP } from './shortread_fastp' include { SHORTREAD_FASTP } from './shortread_fastp'
include { SHORTREAD_ADAPTERREMOVAL } from './shortread_adapterremoval' include { SHORTREAD_ADAPTERREMOVAL } from './shortread_adapterremoval'
include { FASTQC as FASTQC_PROCESSED } from '../../modules/nf-core/fastqc/main' include { FASTQC as FASTQC_PROCESSED } from '../../modules/nf-core/fastqc/main'
include { FALCO as FALCO_PROCESSED } from '../../modules/nf-core/falco/main'
workflow SHORTREAD_PREPROCESSING { workflow SHORTREAD_PREPROCESSING {
take: take:
@ -27,9 +28,15 @@ workflow SHORTREAD_PREPROCESSING {
ch_processed_reads = reads ch_processed_reads = reads
} }
if (params.preprocessing_qc_tool == 'fastqc') {
FASTQC_PROCESSED ( ch_processed_reads ) FASTQC_PROCESSED ( ch_processed_reads )
ch_versions = ch_versions.mix( FASTQC_PROCESSED.out.versions ) ch_versions = ch_versions.mix( FASTQC_PROCESSED.out.versions )
ch_multiqc_files = ch_multiqc_files.mix( FASTQC_PROCESSED.out.zip ) ch_multiqc_files = ch_multiqc_files.mix( FASTQC_PROCESSED.out.zip )
} else if (params.preprocessing_qc_tool == 'falco') {
FALCO_PROCESSED ( ch_processed_reads )
ch_versions = ch_versions.mix( FALCO_PROCESSED.out.versions )
ch_multiqc_files = ch_multiqc_files.mix( FALCO_PROCESSED.out.txt )
}
emit: emit:
reads = ch_processed_reads // channel: [ val(meta), [ reads ] ] reads = ch_processed_reads // channel: [ val(meta), [ reads ] ]

View file

@ -41,6 +41,7 @@ if (params.longread_hostremoval_index ) { ch_longread_reference_index = fi
if (params.diamond_save_reads ) log.warn "[nf-core/taxprofiler] DIAMOND only allows output of a single format. As --diamond_save_reads supplied, only aligned reads in SAM format will be produced, no taxonomic profiles will be available." if (params.diamond_save_reads ) log.warn "[nf-core/taxprofiler] DIAMOND only allows output of a single format. As --diamond_save_reads supplied, only aligned reads in SAM format will be produced, no taxonomic profiles will be available."
if (params.run_malt && params.run_krona && !params.krona_taxonomy_directory) log.warn "[nf-core/taxprofiler] Krona can only be run on MALT output if path to Krona taxonomy database supplied to --krona_taxonomy_directory. Krona will not be executed in this run for MALT." if (params.run_malt && params.run_krona && !params.krona_taxonomy_directory) log.warn "[nf-core/taxprofiler] Krona can only be run on MALT output if path to Krona taxonomy database supplied to --krona_taxonomy_directory. Krona will not be executed in this run for MALT."
if (params.run_bracken && !params.run_kraken2) exit 1, 'ERROR: [nf-core/taxprofiler] You are attempting to run Bracken without running kraken2. This is not possible! Please set --run_kraken2 as well.'
/* /*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -84,6 +85,7 @@ include { STANDARDISATION_PROFILES } from '../subworkflows/local/standardis
// MODULE: Installed directly from nf-core/modules // MODULE: Installed directly from nf-core/modules
// //
include { FASTQC } from '../modules/nf-core/fastqc/main' include { FASTQC } from '../modules/nf-core/fastqc/main'
include { FALCO } from '../modules/nf-core/falco/main'
include { MULTIQC } from '../modules/nf-core/multiqc/main' include { MULTIQC } from '../modules/nf-core/multiqc/main'
include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/custom/dumpsoftwareversions/main' include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/custom/dumpsoftwareversions/main'
include { CAT_FASTQ } from '../modules/nf-core/cat/fastq/main' include { CAT_FASTQ } from '../modules/nf-core/cat/fastq/main'
@ -120,12 +122,13 @@ workflow TAXPROFILER {
*/ */
ch_input_for_fastqc = INPUT_CHECK.out.fastq.mix( INPUT_CHECK.out.nanopore ) ch_input_for_fastqc = INPUT_CHECK.out.fastq.mix( INPUT_CHECK.out.nanopore )
FASTQC ( if ( params.preprocessing_qc_tool == 'falco' ) {
ch_input_for_fastqc FALCO ( ch_input_for_fastqc )
) ch_versions = ch_versions.mix(FALCO.out.versions.first())
} else {
FASTQC ( ch_input_for_fastqc )
ch_versions = ch_versions.mix(FASTQC.out.versions.first()) ch_versions = ch_versions.mix(FASTQC.out.versions.first())
}
/* /*
SUBWORKFLOW: PERFORM PREPROCESSING SUBWORKFLOW: PERFORM PREPROCESSING
*/ */
@ -254,7 +257,13 @@ workflow TAXPROFILER {
ch_multiqc_files = ch_multiqc_files.mix(ch_workflow_summary.collectFile(name: 'workflow_summary_mqc.yaml')) ch_multiqc_files = ch_multiqc_files.mix(ch_workflow_summary.collectFile(name: 'workflow_summary_mqc.yaml'))
ch_multiqc_files = ch_multiqc_files.mix(ch_methods_description.collectFile(name: 'methods_description_mqc.yaml')) ch_multiqc_files = ch_multiqc_files.mix(ch_methods_description.collectFile(name: 'methods_description_mqc.yaml'))
ch_multiqc_files = ch_multiqc_files.mix(CUSTOM_DUMPSOFTWAREVERSIONS.out.mqc_yml.collect()) ch_multiqc_files = ch_multiqc_files.mix(CUSTOM_DUMPSOFTWAREVERSIONS.out.mqc_yml.collect())
if ( params.preprocessing_qc_tool == 'falco' ) {
ch_multiqc_files = ch_multiqc_files.mix(FALCO.out.txt.collect{it[1]}.ifEmpty([]))
} else {
ch_multiqc_files = ch_multiqc_files.mix(FASTQC.out.zip.collect{it[1]}.ifEmpty([])) ch_multiqc_files = ch_multiqc_files.mix(FASTQC.out.zip.collect{it[1]}.ifEmpty([]))
}
if (params.perform_shortread_qc) { if (params.perform_shortread_qc) {
ch_multiqc_files = ch_multiqc_files.mix( SHORTREAD_PREPROCESSING.out.mqc.collect{it[1]}.ifEmpty([]) ) ch_multiqc_files = ch_multiqc_files.mix( SHORTREAD_PREPROCESSING.out.mqc.collect{it[1]}.ifEmpty([]) )
@ -272,6 +281,10 @@ workflow TAXPROFILER {
ch_multiqc_files = ch_multiqc_files.mix(SHORTREAD_HOSTREMOVAL.out.mqc.collect{it[1]}.ifEmpty([])) ch_multiqc_files = ch_multiqc_files.mix(SHORTREAD_HOSTREMOVAL.out.mqc.collect{it[1]}.ifEmpty([]))
} }
if (params.perform_longread_hostremoval) {
ch_multiqc_files = ch_multiqc_files.mix(LONGREAD_HOSTREMOVAL.out.mqc.collect{it[1]}.ifEmpty([]))
}
ch_multiqc_files = ch_multiqc_files.mix( PROFILING.out.mqc.collect{it[1]}.ifEmpty([]) ) ch_multiqc_files = ch_multiqc_files.mix( PROFILING.out.mqc.collect{it[1]}.ifEmpty([]) )
if ( params.run_profile_standardisation ) { if ( params.run_profile_standardisation ) {