diff --git a/conf/modules.config b/conf/modules.config index f85df30..d9e9125 100644 --- a/conf/modules.config +++ b/conf/modules.config @@ -226,7 +226,7 @@ process { pattern: '*.txt' ] ext.args = { "${meta.db_params}" } - ext.prefix = { "${meta.id}-${meta.run_accession}-${meta.db_name}" } + ext.prefix = params.perform_runmerging ? { "${meta.id}-${meta.db_name}" } : { "${meta.id}-${meta.run_accession}-${meta.db_name}" } } withName: CUSTOM_DUMPSOFTWAREVERSIONS { @@ -252,6 +252,6 @@ process { pattern: '*.tsv' ] ext.args = { "${meta.db_params}" } - ext.prefix = { "${meta.id}-${meta.run_accession}-${meta.db_name}" } + ext.prefix = params.perform_runmerging ? { "${meta.id}-${meta.db_name}" } : { "${meta.id}-${meta.run_accession}-${meta.db_name}" } } } diff --git a/conf/test.config b/conf/test.config index 884ff32..107beb5 100644 --- a/conf/test.config +++ b/conf/test.config @@ -24,14 +24,14 @@ params { // TODO nf-core: Give any required params for the test so that command line flags are not needed input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv' databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database.csv' - run_kraken2 = true - run_malt = true - run_metaphlan3 = true - run_centrifuge = true perform_shortread_clipmerge = true perform_longread_clip = false perform_shortread_complexityfilter = true perform_shortread_hostremoval = true shortread_hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta' run_kaiju = true + run_kraken2 = true + run_malt = true + run_metaphlan3 = true + run_centrifuge = true } diff --git a/docs/usage.md b/docs/usage.md index 239a55d..2b6937b 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -86,6 +86,11 @@ Column specifications are as follows: > 💡 You can also specify the same database directory/file twice (ensuring unique `db_name`s) and specify different parameters for each database to compare the effect of different parameters during profiling. +## Profilers + +- `kaiju` + - The file `nodes.dmp` must be inside the database directory that is specified to `--databases` file + ## Running the pipeline The typical command for running the pipeline is as follows: