mirror of
https://github.com/MillironX/taxprofiler.git
synced 2024-11-22 07:19:55 +00:00
Merge pull request #189 from nf-core/database-check
Adds (native groovy!) database check
This commit is contained in:
commit
578081df2e
5 changed files with 49 additions and 53 deletions
|
@ -12,14 +12,6 @@
|
||||||
|
|
||||||
process {
|
process {
|
||||||
|
|
||||||
withName: DATABASE_CHECK {
|
|
||||||
publishDir = [
|
|
||||||
path: { "${params.outdir}/pipeline_info" },
|
|
||||||
mode: params.publish_dir_mode,
|
|
||||||
saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
withName: FASTQC {
|
withName: FASTQC {
|
||||||
ext.args = '--quiet'
|
ext.args = '--quiet'
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}_raw" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}_raw" }
|
||||||
|
|
|
@ -1,29 +0,0 @@
|
||||||
process DATABASE_CHECK {
|
|
||||||
tag "$databasesheet"
|
|
||||||
label 'process_single'
|
|
||||||
|
|
||||||
conda (params.enable_conda ? "conda-forge::python=3.8.3" : null)
|
|
||||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
|
||||||
'https://depot.galaxyproject.org/singularity/python:3.8.3' :
|
|
||||||
'quay.io/biocontainers/python:3.8.3' }"
|
|
||||||
|
|
||||||
input:
|
|
||||||
path databasesheet
|
|
||||||
|
|
||||||
output:
|
|
||||||
path '*.csv' , emit: csv
|
|
||||||
path "versions.yml", emit: versions
|
|
||||||
|
|
||||||
when:
|
|
||||||
task.ext.when == null || task.ext.when
|
|
||||||
|
|
||||||
script: // This script is bundled with the pipeline, in nf-core/taxprofiler/bin/
|
|
||||||
"""
|
|
||||||
cat $databasesheet >> database_sheet.valid.csv
|
|
||||||
|
|
||||||
cat <<-END_VERSIONS > versions.yml
|
|
||||||
"${task.process}":
|
|
||||||
python: \$(python --version | sed 's/Python //g')
|
|
||||||
END_VERSIONS
|
|
||||||
"""
|
|
||||||
}
|
|
|
@ -2,7 +2,6 @@
|
||||||
// Check input samplesheet and get read channels
|
// Check input samplesheet and get read channels
|
||||||
//
|
//
|
||||||
|
|
||||||
include { DATABASE_CHECK } from '../../modules/local/database_check'
|
|
||||||
include { UNTAR } from '../../modules/nf-core/untar/main'
|
include { UNTAR } from '../../modules/nf-core/untar/main'
|
||||||
|
|
||||||
workflow DB_CHECK {
|
workflow DB_CHECK {
|
||||||
|
@ -10,15 +9,27 @@ workflow DB_CHECK {
|
||||||
dbsheet // file: /path/to/dbsheet.csv
|
dbsheet // file: /path/to/dbsheet.csv
|
||||||
|
|
||||||
main:
|
main:
|
||||||
|
ch_versions = Channel.empty()
|
||||||
|
|
||||||
// TODO: make database sheet check
|
// special check to check _between_ rows, for which we must group rows together
|
||||||
// Checks:
|
// note: this will run in parallel to within-row validity, but we can assume this will run faster thus will fail first
|
||||||
// 1) no duplicates,
|
Channel.fromPath(dbsheet)
|
||||||
// 2) args do not have quotes, e.g. just `,,` and NOT `,"",`
|
.splitCsv ( header:true, sep:',' )
|
||||||
parsed_samplesheet = DATABASE_CHECK ( dbsheet )
|
.map {[it.tool, it.db_name] }
|
||||||
.csv
|
.groupTuple()
|
||||||
|
.map {
|
||||||
|
tool, db_name ->
|
||||||
|
def unique_names = db_name.unique(false)
|
||||||
|
if ( unique_names.size() < db_name.size() ) exit 1, "[nf-core/taxprofiler] ERROR: Each database for a tool must have a unique name, duplicated detected. Tool: ${tool}, Database name: ${unique_names}"
|
||||||
|
}
|
||||||
|
|
||||||
|
// normal checks for within-row validity, so can be moved to separate functions
|
||||||
|
parsed_samplesheet = Channel.fromPath(dbsheet)
|
||||||
.splitCsv ( header:true, sep:',' )
|
.splitCsv ( header:true, sep:',' )
|
||||||
.map { create_db_channels(it) }
|
.map {
|
||||||
|
validate_db_rows(it)
|
||||||
|
create_db_channels(it)
|
||||||
|
}
|
||||||
|
|
||||||
ch_dbs_for_untar = parsed_samplesheet
|
ch_dbs_for_untar = parsed_samplesheet
|
||||||
.branch {
|
.branch {
|
||||||
|
@ -29,12 +40,32 @@ workflow DB_CHECK {
|
||||||
// TODO Filter to only run UNTAR on DBs of tools actually using?
|
// TODO Filter to only run UNTAR on DBs of tools actually using?
|
||||||
// TODO make optional whether to save
|
// TODO make optional whether to save
|
||||||
UNTAR ( ch_dbs_for_untar.untar )
|
UNTAR ( ch_dbs_for_untar.untar )
|
||||||
|
ch_versions = ch_versions.mix(UNTAR.out.versions.first())
|
||||||
|
|
||||||
ch_final_dbs = ch_dbs_for_untar.skip.mix( UNTAR.out.untar )
|
ch_final_dbs = ch_dbs_for_untar.skip.mix( UNTAR.out.untar )
|
||||||
|
|
||||||
emit:
|
emit:
|
||||||
dbs = ch_final_dbs // channel: [ val(meta), [ db ] ]
|
dbs = ch_final_dbs // channel: [ val(meta), [ db ] ]
|
||||||
versions = DATABASE_CHECK.out.versions.mix(UNTAR.out.versions.first()) // channel: [ versions.yml ]
|
versions = ch_versions // channel: [ versions.yml ]
|
||||||
|
}
|
||||||
|
|
||||||
|
def validate_db_rows(LinkedHashMap row){
|
||||||
|
|
||||||
|
// check minimum number of columns
|
||||||
|
if (row.size() < 4) exit 1, "[nf-core/taxprofiler] ERROR: Invalid database input sheet - malformed row (e.g. missing column). See documentation for more information. Error in: ${row}"
|
||||||
|
|
||||||
|
// all columns there
|
||||||
|
def expected_headers = ['tool', 'db_name', 'db_params', 'db_path']
|
||||||
|
if ( !row.keySet().containsAll(expected_headers) ) exit 1, "[nf-core/taxprofiler] ERROR: Invalid database input sheet - malformed column names. Please check input TSV. Column names should be: ${expected_keys.join(", ")}"
|
||||||
|
|
||||||
|
// valid tools specified// TIFNISIH LIST
|
||||||
|
def expected_tools = [ "bracken", "centrifuge", "diamond", "kaiju", "kraken2", "krakenuniq", "malt", "metaphlan3", "motus" ]
|
||||||
|
if ( !expected_tools.contains(row.tool) ) exit 1, "[nf-core/taxprofiler] ERROR: Invalid tool name. Please see documentation for all supported profilers. Error in: ${row}"
|
||||||
|
|
||||||
|
// detect quotes in params
|
||||||
|
if ( row.db_params.contains('"') ) exit 1, "[nf-core/taxprofiler] ERROR: Invalid database db_params entry. No quotes allowed. Error in: ${row}"
|
||||||
|
if ( row.db_params.contains("'") ) exit 1, "[nf-core/taxprofiler] ERROR: Invalid database db_params entry. No quotes allowed. Error in: ${row}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def create_db_channels(LinkedHashMap row) {
|
def create_db_channels(LinkedHashMap row) {
|
||||||
|
@ -45,9 +76,11 @@ def create_db_channels(LinkedHashMap row) {
|
||||||
|
|
||||||
def array = []
|
def array = []
|
||||||
if (!file(row.db_path, type: 'dir').exists()) {
|
if (!file(row.db_path, type: 'dir').exists()) {
|
||||||
exit 1, "ERROR: Please check input samplesheet -> database could not be found!\n${row.db_path}"
|
exit 1, "ERROR: Please check input samplesheet -> database path could not be found!\n${row.db_path}"
|
||||||
}
|
}
|
||||||
array = [ meta, file(row.db_path) ]
|
array = [ meta, file(row.db_path) ]
|
||||||
|
|
||||||
return array
|
return array
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -41,14 +41,14 @@ workflow PROFILING {
|
||||||
}
|
}
|
||||||
.combine(databases)
|
.combine(databases)
|
||||||
.branch {
|
.branch {
|
||||||
malt: it[2]['tool'] == 'malt'
|
|
||||||
kraken2: it[2]['tool'] == 'kraken2' || it[2]['tool'] == 'bracken' // to reuse the kraken module to produce the input data for bracken
|
|
||||||
metaphlan3: it[2]['tool'] == 'metaphlan3'
|
|
||||||
centrifuge: it[2]['tool'] == 'centrifuge'
|
centrifuge: it[2]['tool'] == 'centrifuge'
|
||||||
kaiju: it[2]['tool'] == 'kaiju'
|
|
||||||
diamond: it[2]['tool'] == 'diamond'
|
diamond: it[2]['tool'] == 'diamond'
|
||||||
motus: it[2]['tool'] == 'motus'
|
kaiju: it[2]['tool'] == 'kaiju'
|
||||||
|
kraken2: it[2]['tool'] == 'kraken2' || it[2]['tool'] == 'bracken' // to reuse the kraken module to produce the input data for bracken
|
||||||
krakenuniq: it[2]['tool'] == 'krakenuniq'
|
krakenuniq: it[2]['tool'] == 'krakenuniq'
|
||||||
|
malt: it[2]['tool'] == 'malt'
|
||||||
|
metaphlan3: it[2]['tool'] == 'metaphlan3'
|
||||||
|
motus: it[2]['tool'] == 'motus'
|
||||||
unknown: true
|
unknown: true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ if ( params.input ) {
|
||||||
exit 1, "Input samplesheet, or PEP config and base directory not specified"
|
exit 1, "Input samplesheet, or PEP config and base directory not specified"
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.databases) { ch_databases = file(params.databases) } else { exit 1, 'Input database sheet not specified!' }
|
if (params.databases) { ch_databases = file(params.databases, checkIfExists: true) } else { exit 1, 'Input database sheet not specified!' }
|
||||||
|
|
||||||
if (params.shortread_qc_mergepairs && params.run_malt ) log.warn "[nf-core/taxprofiler] MALT does not accept uncollapsed paired-reads. Pairs will be profiled as separate files."
|
if (params.shortread_qc_mergepairs && params.run_malt ) log.warn "[nf-core/taxprofiler] MALT does not accept uncollapsed paired-reads. Pairs will be profiled as separate files."
|
||||||
if (params.shortread_qc_includeunmerged && !params.shortread_qc_mergepairs) exit 1, "ERROR: [nf-core/taxprofiler] cannot include unmerged reads when merging is not turned on. Please specify --shortread_qc_mergepairs"
|
if (params.shortread_qc_includeunmerged && !params.shortread_qc_mergepairs) exit 1, "ERROR: [nf-core/taxprofiler] cannot include unmerged reads when merging is not turned on. Please specify --shortread_qc_mergepairs"
|
||||||
|
|
Loading…
Reference in a new issue