diff --git a/conf/modules.config b/conf/modules.config index 28ede9d..30f057e 100644 --- a/conf/modules.config +++ b/conf/modules.config @@ -230,7 +230,7 @@ process { path: { "${params.outdir}/minimap2/index" }, mode: params.publish_dir_mode, enabled: params.save_hostremoval_index, - pattern: 'minimap2' + pattern: '*.mmi' ] } diff --git a/docs/usage.md b/docs/usage.md index 3a4e55c..d37ba60 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -64,6 +64,8 @@ ERR3201952,ERR3201952,OXFORD_NANOPORE,///fastq/ERR3201952.fastq.gz,, > ⚠️ Input FASTQ and FASTA files _must_ be gzipped +> ⚠️ While one can include both short-read and long-read data in one run, we recommend that you split these across _two_ pipeline runs and database sheets (see below). This will allow classification optimisation for each data type, and make MultiQC run-reports more readable (due to run statistics having vary large number differences). + | Column | Description | | --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `sample` | Unique sample name [required]. | diff --git a/subworkflows/local/db_check.nf b/subworkflows/local/db_check.nf index 7490b72..270f597 100644 --- a/subworkflows/local/db_check.nf +++ b/subworkflows/local/db_check.nf @@ -44,6 +44,7 @@ workflow DB_CHECK { .filter { params["run_${it[0]['tool']}"] } + UNTAR (ch_input_untar) ch_versions = ch_versions.mix(UNTAR.out.versions.first()) ch_final_dbs = ch_dbs_for_untar.skip.mix( UNTAR.out.untar ) diff --git a/subworkflows/local/profiling.nf b/subworkflows/local/profiling.nf index 4b4952c..1fe4cbb 100644 --- a/subworkflows/local/profiling.nf +++ b/subworkflows/local/profiling.nf @@ -203,6 +203,8 @@ workflow PROFILING { .filter{ if (it[0].is_fasta) log.warn "[nf-core/taxprofiler] MetaPhlAn3 currently does not accept FASTA files as input. Skipping MetaPhlAn3 for sample ${it[0].id}." !it[0].is_fasta + if (it[0].instrument_platform == 'OXFORD_NANOPORE') log.warn "[nf-core/taxprofiler] MetaPhlAn3 has not been evaluated for Nanopore data. Skipping MetaPhlAn3 for sample ${it[0].id}." + !it[0].instrument_platform == 'OXFORD_NANOPORE' } .multiMap { it -> @@ -277,14 +279,13 @@ workflow PROFILING { [[id: db_meta.db_name, single_end: meta.single_end], reads, db_meta, db] } .groupTuple(by: [0,2,3]) - .dump(tag: "krakenuniq_premultimap") .multiMap { single_meta, reads, db_meta, db -> reads: [ single_meta + db_meta, reads.flatten() ] db: db } // Hardcode to _always_ produce the report file (which is our basic otput, and goes into) - KRAKENUNIQ_PRELOADEDKRAKENUNIQ ( ch_input_for_krakenuniq.reads.dump(tag: "krakenuniq_input"), ch_input_for_krakenuniq.db.dump(tag: "krakenuniq_db"), params.krakenuniq_ram_chunk_size, params.krakenuniq_save_reads, true, params.krakenuniq_save_readclassifications ) + KRAKENUNIQ_PRELOADEDKRAKENUNIQ ( ch_input_for_krakenuniq.reads, ch_input_for_krakenuniq.db, params.krakenuniq_ram_chunk_size, params.krakenuniq_save_reads, true, params.krakenuniq_save_readclassifications ) ch_multiqc_files = ch_multiqc_files.mix( KRAKENUNIQ_PRELOADEDKRAKENUNIQ.out.report ) ch_versions = ch_versions.mix( KRAKENUNIQ_PRELOADEDKRAKENUNIQ.out.versions.first() ) ch_raw_classifications = ch_raw_classifications.mix( KRAKENUNIQ_PRELOADEDKRAKENUNIQ.out.classified_assignment )