2022-04-10 04:43:30 +00:00
//
// Run profiling
//
2022-10-05 11:40:43 +00:00
include { MALT_RUN } from '../../modules/nf-core/malt/run/main'
include { MEGAN_RMA2INFO as MEGAN_RMA2INFO_TSV } from '../../modules/nf-core/megan/rma2info/main'
include { KRAKEN2_KRAKEN2 } from '../../modules/nf-core/kraken2/kraken2/main'
2022-10-14 12:48:27 +00:00
include { KRAKEN2_STANDARD_REPORT } from '../../modules/local/kraken2_standard_report'
2022-10-14 10:18:07 +00:00
include { BRACKEN_BRACKEN } from '../../modules/nf-core/bracken/bracken/main'
2022-10-05 11:40:43 +00:00
include { CENTRIFUGE_CENTRIFUGE } from '../../modules/nf-core/centrifuge/centrifuge/main'
include { CENTRIFUGE_KREPORT } from '../../modules/nf-core/centrifuge/kreport/main'
include { METAPHLAN3_METAPHLAN3 } from '../../modules/nf-core/metaphlan3/metaphlan3/main'
include { KAIJU_KAIJU } from '../../modules/nf-core/kaiju/kaiju/main'
include { DIAMOND_BLASTX } from '../../modules/nf-core/diamond/blastx/main'
include { MOTUS_PROFILE } from '../../modules/nf-core/motus/profile/main'
2022-10-25 09:45:05 +00:00
include { KRAKENUNIQ_PRELOADEDKRAKENUNIQ } from '../../modules/nf-core/krakenuniq/preloadedkrakenuniq/main'
2022-04-10 04:43:30 +00:00
workflow PROFILING {
take:
2022-04-12 08:12:17 +00:00
reads // [ [ meta ], [ reads ] ]
2022-04-10 04:43:30 +00:00
databases // [ [ meta ], path ]
main:
2022-04-13 09:49:35 +00:00
ch_versions = Channel.empty()
ch_multiqc_files = Channel.empty()
2022-06-27 17:12:16 +00:00
ch_raw_classifications = Channel.empty()
ch_raw_profiles = Channel.empty()
2022-04-10 04:43:30 +00:00
/*
COMBINE READS WITH POSSIBLE DATABASES
*/
// e.g. output [DUMP: reads_plus_db] [['id':'2612', 'run_accession':'combined', 'instrument_platform':'ILLUMINA', 'single_end':1], <reads_path>/2612.merged.fastq.gz, ['tool':'malt', 'db_name':'mal95', 'db_params':'"-id 90"'], <db_path>/malt90]
2022-04-12 08:12:17 +00:00
ch_input_for_profiling = reads
2022-04-12 08:47:44 +00:00
.map {
2022-04-12 08:46:03 +00:00
meta, reads ->
def meta_new = meta.clone()
pairtype = meta_new['single_end'] ? '_se' : '_pe'
meta_new['id'] = meta_new['id'] + pairtype
[meta_new, reads]
}
2022-04-10 04:43:30 +00:00
.combine(databases)
.branch {
centrifuge: it[2]['tool'] == 'centrifuge'
2022-04-29 19:59:42 +00:00
diamond: it[2]['tool'] == 'diamond'
2022-12-12 10:03:40 +00:00
kaiju: it[2]['tool'] == 'kaiju'
kraken2: it[2]['tool'] == 'kraken2' || it[2]['tool'] == 'bracken' // to reuse the kraken module to produce the input data for bracken
2022-10-25 09:45:05 +00:00
krakenuniq: it[2]['tool'] == 'krakenuniq'
2022-12-12 10:03:40 +00:00
malt: it[2]['tool'] == 'malt'
metaphlan3: it[2]['tool'] == 'metaphlan3'
motus: it[2]['tool'] == 'motus'
2022-04-10 04:43:30 +00:00
unknown: true
}
/*
2022-05-01 05:24:58 +00:00
PREPARE PROFILER INPUT CHANNELS & RUN PROFILING
2022-04-10 04:43:30 +00:00
*/
// Each tool as a slightly different input structure and generally separate
// input channels for reads vs databases. We restructure the channel tuple
// for each tool and make liberal use of multiMap to keep reads/databases
// channel element order in sync with each other
2022-05-01 05:24:58 +00:00
if ( params.run_malt ) {
2022-04-10 04:43:30 +00:00
2022-04-13 16:51:56 +00:00
2022-05-01 05:24:58 +00:00
// MALT: We groupTuple to have all samples in one channel for MALT as database
// loading takes a long time, so we only want to run it once per database
ch_input_for_malt = ch_input_for_profiling.malt
.map {
2022-05-07 03:22:35 +00:00
meta, reads, db_meta, db ->
2022-06-10 17:33:37 +00:00
// Reset entire input meta for MALT to just database name,
// as we don't run run on a per-sample basis due to huge datbaases
// so all samples are in one run and so sample-specific metadata
// unnecessary. Set as database name to prevent `null` job ID and prefix.
def temp_meta = [ id: meta['db_name'] ]
// Extend database parameters to specify whether to save alignments or not
2022-05-07 03:22:35 +00:00
def new_db_meta = db_meta.clone()
2022-06-10 09:27:51 +00:00
def sam_format = params.malt_save_reads ? ' --alignments ./ -za false' : ""
2022-05-07 03:22:35 +00:00
new_db_meta['db_params'] = db_meta['db_params'] + sam_format
2022-06-10 17:33:37 +00:00
// Combine reduced sample metadata with updated database parameters metadata,
// make sure id is db_name for publishing purposes.
def new_meta = temp_meta + new_db_meta
new_meta['id'] = new_meta['db_name']
[ new_meta, reads, db ]
2022-05-01 05:24:58 +00:00
}
.groupTuple(by: [0,2])
.multiMap {
it ->
reads: [ it[0], it[1].flatten() ]
db: it[2]
2022-05-01 05:28:29 +00:00
}
2022-04-10 04:43:30 +00:00
2022-11-24 13:44:04 +00:00
MALT_RUN ( ch_input_for_malt.reads, ch_input_for_malt.db )
2022-04-16 05:42:30 +00:00
ch_maltrun_for_megan = MALT_RUN.out.rma6
.transpose()
.map{
meta, rma ->
// re-extract meta from file names, use filename without rma to
// ensure we keep paired-end information in downstream filenames
// when no pair-merging
def meta_new = meta.clone()
meta_new['db_name'] = meta.id
2022-04-19 10:54:57 +00:00
meta_new['id'] = rma.baseName
2022-04-16 05:42:30 +00:00
[ meta_new, rma ]
}
2022-09-02 06:03:05 +00:00
MEGAN_RMA2INFO_TSV (ch_maltrun_for_megan, params.malt_generate_megansummary )
2022-07-26 08:12:33 +00:00
ch_multiqc_files = ch_multiqc_files.mix( MALT_RUN.out.log )
2022-09-02 06:03:05 +00:00
ch_versions = ch_versions.mix( MALT_RUN.out.versions.first(), MEGAN_RMA2INFO_TSV.out.versions.first() )
2022-06-27 17:12:16 +00:00
ch_raw_classifications = ch_raw_classifications.mix( ch_maltrun_for_megan )
2022-09-02 06:03:05 +00:00
ch_raw_profiles = ch_raw_profiles.mix( MEGAN_RMA2INFO_TSV.out.txt )
2022-05-01 05:24:58 +00:00
2022-04-10 04:43:30 +00:00
}
if ( params.run_kraken2 ) {
2022-05-01 05:24:58 +00:00
ch_input_for_kraken2 = ch_input_for_profiling.kraken2
.multiMap {
it ->
reads: [ it[0] + it[2], it[1] ]
db: it[3]
}
2022-05-07 03:22:35 +00:00
KRAKEN2_KRAKEN2 ( ch_input_for_kraken2.reads, ch_input_for_kraken2.db, params.kraken2_save_reads, params.kraken2_save_readclassification )
2022-07-26 08:12:33 +00:00
ch_multiqc_files = ch_multiqc_files.mix( KRAKEN2_KRAKEN2.out.report )
2022-06-27 17:12:16 +00:00
ch_versions = ch_versions.mix( KRAKEN2_KRAKEN2.out.versions.first() )
ch_raw_classifications = ch_raw_classifications.mix( KRAKEN2_KRAKEN2.out.classified_reads_assignment )
2022-10-19 14:21:36 +00:00
ch_raw_profiles = ch_raw_profiles.mix(
KRAKEN2_KRAKEN2.out.report
2022-10-21 08:03:36 +00:00
// Set the tool to be strictly 'kraken2' instead of potentially 'bracken' for downstream use.
2022-10-21 14:36:35 +00:00
// Will remain distinct from 'pure' Kraken2 results due to distinct database names in file names.
2022-10-19 14:21:36 +00:00
.map { meta, report -> [meta + [tool: 'kraken2'], report]}
)
2022-05-01 05:24:58 +00:00
2022-04-10 04:43:30 +00:00
}
2022-10-14 10:18:07 +00:00
if ( params.run_kraken2 && params.run_bracken ) {
2022-10-27 09:19:14 +00:00
// Remove files from 'pure' kraken2 runs, so only those aligned against Bracken & kraken2 database are used.
def ch_kraken2_output = KRAKEN2_KRAKEN2.out.report
2022-12-02 11:47:14 +00:00
.filter {
meta, report ->
2022-12-02 14:09:08 +00:00
if ( meta['instrument_platform'] == 'OXFORD_NANOPORE' ) log.warn "[nf-core/taxprofiler] Bracken has not been evaluated for Nanopore data. Skipping Bracken for sample ${meta.id}."
2022-12-03 20:38:25 +00:00
meta['tool'] == 'bracken' && meta['instrument_platform'] != 'OXFORD_NANOPORE'
2022-12-02 11:47:14 +00:00
}
2022-10-27 09:19:14 +00:00
// If necessary, convert the eight column output to six column output.
2022-10-14 10:18:07 +00:00
if (params.kraken2_save_minimizers) {
2022-10-27 09:19:14 +00:00
ch_kraken2_output = KRAKEN2_STANDARD_REPORT(ch_kraken2_output).report
2022-10-14 10:18:07 +00:00
}
2022-10-27 09:19:14 +00:00
// Extract the database name to combine by.
2022-10-27 09:50:47 +00:00
ch_bracken_databases = databases
2022-10-27 09:19:14 +00:00
.filter { meta, db -> meta['tool'] == 'bracken' }
.map { meta, db -> [meta['db_name'], meta, db] }
// Extract the database name to combine by.
2022-10-27 09:50:47 +00:00
ch_input_for_bracken = ch_kraken2_output
2022-10-27 09:19:14 +00:00
.map { meta, report -> [meta['db_name'], meta, report] }
.combine(ch_bracken_databases, by: 0)
.multiMap { key, meta, report, db_meta, db ->
2022-10-21 08:03:36 +00:00
report: [meta + db_meta, report]
2022-10-14 10:18:07 +00:00
db: db
}
BRACKEN_BRACKEN(ch_input_for_bracken.report, ch_input_for_bracken.db)
ch_versions = ch_versions.mix(BRACKEN_BRACKEN.out.versions.first())
2022-10-21 08:03:36 +00:00
ch_raw_profiles = ch_raw_profiles.mix(BRACKEN_BRACKEN.out.reports)
2022-05-01 05:24:58 +00:00
2022-04-10 04:43:30 +00:00
}
if ( params.run_centrifuge ) {
2022-05-01 05:24:58 +00:00
ch_input_for_centrifuge = ch_input_for_profiling.centrifuge
.filter{
if (it[0].is_fasta) log.warn "[nf-core/taxprofiler] Centrifuge currently does not accept FASTA files as input. Skipping Centrifuge for sample ${it[0].id}."
!it[0].is_fasta
}
.multiMap {
it ->
reads: [ it[0] + it[2], it[1] ]
db: it[3]
}
2022-05-07 03:22:35 +00:00
CENTRIFUGE_CENTRIFUGE ( ch_input_for_centrifuge.reads, ch_input_for_centrifuge.db, params.centrifuge_save_reads, params.centrifuge_save_reads, params.centrifuge_save_reads )
2022-07-27 12:42:35 +00:00
CENTRIFUGE_KREPORT (CENTRIFUGE_CENTRIFUGE.out.report, ch_input_for_centrifuge.db)
2022-06-27 17:12:16 +00:00
ch_versions = ch_versions.mix( CENTRIFUGE_CENTRIFUGE.out.versions.first() )
ch_raw_classifications = ch_raw_classifications.mix( CENTRIFUGE_CENTRIFUGE.out.results )
ch_raw_profiles = ch_raw_profiles.mix( CENTRIFUGE_KREPORT.out.kreport )
2022-07-26 08:18:54 +00:00
ch_multiqc_files = ch_multiqc_files.mix( CENTRIFUGE_KREPORT.out.kreport )
2022-05-01 05:24:58 +00:00
2022-04-10 04:43:30 +00:00
}
if ( params.run_metaphlan3 ) {
2022-05-01 05:24:58 +00:00
ch_input_for_metaphlan3 = ch_input_for_profiling.metaphlan3
.filter{
if (it[0].is_fasta) log.warn "[nf-core/taxprofiler] MetaPhlAn3 currently does not accept FASTA files as input. Skipping MetaPhlAn3 for sample ${it[0].id}."
!it[0].is_fasta
}
.multiMap {
it ->
reads: [it[0] + it[2], it[1]]
db: it[3]
}
2022-09-15 10:29:54 +00:00
METAPHLAN3_METAPHLAN3 ( ch_input_for_metaphlan3.reads, ch_input_for_metaphlan3.db )
ch_versions = ch_versions.mix( METAPHLAN3_METAPHLAN3.out.versions.first() )
ch_raw_profiles = ch_raw_profiles.mix( METAPHLAN3_METAPHLAN3.out.profile )
2022-05-01 05:24:58 +00:00
2022-04-10 04:43:30 +00:00
}
2022-04-13 16:51:56 +00:00
if ( params.run_kaiju ) {
2022-05-01 05:24:58 +00:00
ch_input_for_kaiju = ch_input_for_profiling.kaiju
.multiMap {
it ->
reads: [it[0] + it[2], it[1]]
db: it[3]
}
2022-04-25 15:36:29 +00:00
KAIJU_KAIJU ( ch_input_for_kaiju.reads, ch_input_for_kaiju.db)
2022-04-13 16:51:56 +00:00
ch_versions = ch_versions.mix( KAIJU_KAIJU.out.versions.first() )
2022-06-27 17:12:16 +00:00
ch_raw_classifications = ch_raw_classifications.mix( KAIJU_KAIJU.out.results )
2022-05-01 05:24:58 +00:00
2022-04-13 16:51:56 +00:00
}
2022-04-10 04:43:30 +00:00
2022-04-29 19:59:42 +00:00
if ( params.run_diamond ) {
2022-05-01 05:24:58 +00:00
ch_input_for_diamond = ch_input_for_profiling.diamond
.multiMap {
it ->
reads: [it[0] + it[2], it[1]]
db: it[3]
}
2022-05-07 03:22:35 +00:00
// diamond only accepts single output file specification, therefore
// this will replace output file!
ch_diamond_reads_format = params.diamond_save_reads ? 'sam' : params.diamond_output_format
2022-06-03 20:29:04 +00:00
DIAMOND_BLASTX ( ch_input_for_diamond.reads, ch_input_for_diamond.db, ch_diamond_reads_format , [] )
2022-04-29 19:59:42 +00:00
ch_versions = ch_versions.mix( DIAMOND_BLASTX.out.versions.first() )
2022-05-07 03:22:35 +00:00
ch_raw_profiles = ch_raw_profiles.mix( DIAMOND_BLASTX.out.tsv )
2022-08-25 13:50:28 +00:00
ch_multiqc_files = ch_multiqc_files.mix( DIAMOND_BLASTX.out.log )
2022-05-01 05:24:58 +00:00
2022-04-29 19:59:42 +00:00
}
2022-05-23 12:05:06 +00:00
if ( params.run_motus ) {
ch_input_for_motus = ch_input_for_profiling.motus
2022-05-24 12:42:18 +00:00
.filter{
if (it[0].is_fasta) log.warn "[nf-core/taxprofiler] mOTUs currently does not accept FASTA files as input. Skipping mOTUs for sample ${it[0].id}."
!it[0].is_fasta
}
2022-05-23 12:05:06 +00:00
.multiMap {
it ->
reads: [it[0] + it[2], it[1]]
db: it[3]
}
MOTUS_PROFILE ( ch_input_for_motus.reads, ch_input_for_motus.db )
ch_versions = ch_versions.mix( MOTUS_PROFILE.out.versions.first() )
ch_raw_profiles = ch_raw_profiles.mix( MOTUS_PROFILE.out.out )
2022-07-26 08:12:33 +00:00
ch_multiqc_files = ch_multiqc_files.mix( MOTUS_PROFILE.out.log )
2022-05-23 12:05:06 +00:00
}
2022-10-25 09:45:05 +00:00
if ( params.run_krakenuniq ) {
ch_input_for_krakenuniq = ch_input_for_profiling.krakenuniq
2022-11-22 11:13:08 +00:00
.map {
meta, reads, db_meta, db ->
2022-11-29 13:19:01 +00:00
[[id: db_meta.db_name, single_end: meta.single_end], reads, db_meta, db]
2022-11-22 11:13:08 +00:00
}
.groupTuple(by: [0,2,3])
2022-10-25 09:45:05 +00:00
.multiMap {
2022-11-29 09:14:20 +00:00
single_meta, reads, db_meta, db ->
reads: [ single_meta + db_meta, reads.flatten() ]
db: db
2022-10-25 09:45:05 +00:00
}
2022-11-03 14:02:22 +00:00
// Hardcode to _always_ produce the report file (which is our basic otput, and goes into)
2023-01-24 12:20:58 +00:00
KRAKENUNIQ_PRELOADEDKRAKENUNIQ ( ch_input_for_krakenuniq.reads, ch_input_for_krakenuniq.db, params.krakenuniq_ram_chunk_size, params.krakenuniq_save_reads, true, params.krakenuniq_save_readclassifications )
2022-10-25 09:45:05 +00:00
ch_multiqc_files = ch_multiqc_files.mix( KRAKENUNIQ_PRELOADEDKRAKENUNIQ.out.report )
ch_versions = ch_versions.mix( KRAKENUNIQ_PRELOADEDKRAKENUNIQ.out.versions.first() )
ch_raw_classifications = ch_raw_classifications.mix( KRAKENUNIQ_PRELOADEDKRAKENUNIQ.out.classified_assignment )
ch_raw_profiles = ch_raw_profiles.mix( KRAKENUNIQ_PRELOADEDKRAKENUNIQ.out.report )
}
2022-04-10 04:43:30 +00:00
emit:
2022-06-27 17:12:16 +00:00
classifications = ch_raw_classifications
profiles = ch_raw_profiles // channel: [ val(meta), [ reads ] ] - should be text files or biom
versions = ch_versions // channel: [ versions.yml ]
2022-07-19 15:10:10 +00:00
motus_version = params.run_motus ? MOTUS_PROFILE.out.versions.first() : Channel.empty()
2022-06-27 17:12:16 +00:00
mqc = ch_multiqc_files
2022-04-10 04:43:30 +00:00
}