mirror of
https://github.com/MillironX/taxprofiler.git
synced 2024-11-21 16:06:05 +00:00
Merge pull request #276 from nf-core/feat-batch
feat: introduce sample batches for krakenuniq
This commit is contained in:
commit
c4ad03ff9f
4 changed files with 23 additions and 10 deletions
|
@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||||
|
|
||||||
### `Added`
|
### `Added`
|
||||||
|
|
||||||
|
- [#276](https://github.com/nf-core/taxprofiler/pull/276) Implemented batching in the KrakenUniq samples processing. (added by @Midnighter)
|
||||||
- [#272](https://github.com/nf-core/taxprofiler/pull/272) - Add saving of final 'analysis-ready-reads' to dedicated directory. (❤️ to @alexhbnr for reporting, added by @jfy133)
|
- [#272](https://github.com/nf-core/taxprofiler/pull/272) - Add saving of final 'analysis-ready-reads' to dedicated directory. (❤️ to @alexhbnr for reporting, added by @jfy133)
|
||||||
|
|
||||||
### `Fixed`
|
### `Fixed`
|
||||||
|
|
|
@ -125,6 +125,7 @@ params {
|
||||||
krakenuniq_ram_chunk_size = '16G'
|
krakenuniq_ram_chunk_size = '16G'
|
||||||
krakenuniq_save_reads = false // added directly to module in profiling.nf
|
krakenuniq_save_reads = false // added directly to module in profiling.nf
|
||||||
krakenuniq_save_readclassifications = false // added directly to module in profiling.nf
|
krakenuniq_save_readclassifications = false // added directly to module in profiling.nf
|
||||||
|
krakenuniq_batch_size = 20
|
||||||
|
|
||||||
// Bracken
|
// Bracken
|
||||||
run_bracken = false
|
run_bracken = false
|
||||||
|
|
|
@ -432,6 +432,13 @@
|
||||||
"description": "Turn on saving of KrakenUniq per-read taxonomic assignment file",
|
"description": "Turn on saving of KrakenUniq per-read taxonomic assignment file",
|
||||||
"help_text": "Save a text file that contains a list of each read that had a taxonomic assignment, with information on specific taxonomic taxonomic assignment that that read recieved.\n\n> Modifies tool parameter(s):\n> - krakenuniq: `--output`"
|
"help_text": "Save a text file that contains a list of each read that had a taxonomic assignment, with information on specific taxonomic taxonomic assignment that that read recieved.\n\n> Modifies tool parameter(s):\n> - krakenuniq: `--output`"
|
||||||
},
|
},
|
||||||
|
"krakenuniq_batch_size": {
|
||||||
|
"type": "integer",
|
||||||
|
"default": 20,
|
||||||
|
"fa_icon": "far fa-window-restore",
|
||||||
|
"description": "Specify the number of samples for each KrakenUniq run",
|
||||||
|
"help_text": "Specify the batch size for KrakenUniq. The reference database for KrakenUniq is loaded into memory once per nextflow process and then used to classify many samples. When you have many samples, a single KrakenUniq run can be rather slow. Alternatively, we can split up KrakenUniq runs for a 'batch' of samples, allowing a balance between shared use of database for multiple samples, but also faster parallelised KrakenUniq runs. This parameter determines for how many samples at a time."
|
||||||
|
},
|
||||||
"run_bracken": {
|
"run_bracken": {
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
"description": "Turn on Bracken (and the required Kraken2 prerequisite step).",
|
"description": "Turn on Bracken (and the required Kraken2 prerequisite step).",
|
||||||
|
|
|
@ -315,16 +315,20 @@ workflow PROFILING {
|
||||||
|
|
||||||
if ( params.run_krakenuniq ) {
|
if ( params.run_krakenuniq ) {
|
||||||
ch_input_for_krakenuniq = ch_input_for_profiling.krakenuniq
|
ch_input_for_krakenuniq = ch_input_for_profiling.krakenuniq
|
||||||
.map {
|
.map {
|
||||||
meta, reads, db_meta, db ->
|
meta, reads, db_meta, db ->
|
||||||
[[id: db_meta.db_name, single_end: meta.single_end], reads, db_meta, db]
|
[[id: db_meta.db_name, single_end: meta.single_end], reads, db_meta, db]
|
||||||
}
|
}
|
||||||
.groupTuple(by: [0,2,3])
|
.groupTuple(by: [0,2,3])
|
||||||
.multiMap {
|
.flatMap { single_meta, reads, db_meta, db ->
|
||||||
single_meta, reads, db_meta, db ->
|
def batches = reads.collate(params.krakenuniq_batch_size)
|
||||||
reads: [ single_meta + db_meta, reads.flatten() ]
|
return batches.collect { batch -> [ single_meta + db_meta, batch.flatten(), db ]}
|
||||||
db: db
|
}
|
||||||
}
|
.multiMap {
|
||||||
|
meta, reads, db ->
|
||||||
|
reads: [ meta, reads ]
|
||||||
|
db: db
|
||||||
|
}
|
||||||
// Hardcode to _always_ produce the report file (which is our basic output, and goes into)
|
// Hardcode to _always_ produce the report file (which is our basic output, and goes into)
|
||||||
KRAKENUNIQ_PRELOADEDKRAKENUNIQ ( ch_input_for_krakenuniq.reads, ch_input_for_krakenuniq.db, params.krakenuniq_ram_chunk_size, params.krakenuniq_save_reads, true, params.krakenuniq_save_readclassifications )
|
KRAKENUNIQ_PRELOADEDKRAKENUNIQ ( ch_input_for_krakenuniq.reads, ch_input_for_krakenuniq.db, params.krakenuniq_ram_chunk_size, params.krakenuniq_save_reads, true, params.krakenuniq_save_readclassifications )
|
||||||
ch_multiqc_files = ch_multiqc_files.mix( KRAKENUNIQ_PRELOADEDKRAKENUNIQ.out.report )
|
ch_multiqc_files = ch_multiqc_files.mix( KRAKENUNIQ_PRELOADEDKRAKENUNIQ.out.report )
|
||||||
|
|
Loading…
Reference in a new issue