mirror of
https://github.com/MillironX/taxprofiler.git
synced 2024-11-25 22:09:55 +00:00
Add working basic test to begin
This commit is contained in:
parent
cd9e3c604c
commit
81bfb629ca
5 changed files with 35 additions and 9 deletions
1
.github/workflows/ci.yml
vendored
1
.github/workflows/ci.yml
vendored
|
@ -48,3 +48,4 @@ jobs:
|
||||||
# Remember that you can parallelise this by using strategy.matrix
|
# Remember that you can parallelise this by using strategy.matrix
|
||||||
run: |
|
run: |
|
||||||
nextflow run ${GITHUB_WORKSPACE} -profile test,docker --outdir ./results
|
nextflow run ${GITHUB_WORKSPACE} -profile test,docker --outdir ./results
|
||||||
|
# TODO Add test that runs with pre-downloaded and decompressed databases
|
||||||
|
|
|
@ -26,6 +26,22 @@ process {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
withName: DATABASE_CHECK {
|
||||||
|
publishDir = [
|
||||||
|
path: { "${params.outdir}/pipeline_info" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
withName: UNTAR {
|
||||||
|
publishDir = [
|
||||||
|
path: { "${params.outdir}/databases" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
withName: FASTQC {
|
withName: FASTQC {
|
||||||
ext.args = '--quiet'
|
ext.args = '--quiet'
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}_raw" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}_raw" }
|
||||||
|
|
|
@ -22,6 +22,12 @@ params {
|
||||||
// Input data
|
// Input data
|
||||||
// TODO nf-core: Specify the paths to your test data on nf-core/test-datasets
|
// TODO nf-core: Specify the paths to your test data on nf-core/test-datasets
|
||||||
// TODO nf-core: Give any required params for the test so that command line flags are not needed
|
// TODO nf-core: Give any required params for the test so that command line flags are not needed
|
||||||
input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
|
input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
|
||||||
|
outdir = "./results"
|
||||||
|
// TODO replace with official once ready
|
||||||
|
databases = 'https://raw.githubusercontent.com/jfy133/nf-core-test-datasets/taxprofiler/database.csv'
|
||||||
|
run_kraken2 = true
|
||||||
|
run_malt = true
|
||||||
|
shortread_clipmerge = true
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@ workflow INPUT_CHECK {
|
||||||
.set { nanopore }
|
.set { nanopore }
|
||||||
|
|
||||||
parsed_samplesheet.fasta
|
parsed_samplesheet.fasta
|
||||||
.map { create_fasta_channels(it) }
|
.map { create_fasta_channel(it) }
|
||||||
.dump(tag: "fasta_channel_init")
|
.dump(tag: "fasta_channel_init")
|
||||||
.set { fasta }
|
.set { fasta }
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ workflow INPUT_CHECK {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function to get list of [ meta, [ fastq_1, fastq_2 ] ]
|
// Function to get list of [ meta, [ fastq_1, fastq_2 ] ]
|
||||||
def create_fastq_channels(LinkedHashMap row) {
|
def create_fastq_channel(LinkedHashMap row) {
|
||||||
// create meta map
|
// create meta map
|
||||||
def meta = [:]
|
def meta = [:]
|
||||||
meta.id = row.sample
|
meta.id = row.sample
|
||||||
|
@ -74,7 +74,7 @@ def create_fastq_channels(LinkedHashMap row) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function to get list of [ meta, fasta ]
|
// Function to get list of [ meta, fasta ]
|
||||||
def create_fasta_channels(LinkedHashMap row) {
|
def create_fasta_channel(LinkedHashMap row) {
|
||||||
def meta = [:]
|
def meta = [:]
|
||||||
meta.id = row.sample
|
meta.id = row.sample
|
||||||
meta.run_accession = row.run_accession
|
meta.run_accession = row.run_accession
|
||||||
|
|
|
@ -101,7 +101,9 @@ workflow TAXPROFILER {
|
||||||
// PERFORM PREPROCESSING
|
// PERFORM PREPROCESSING
|
||||||
//
|
//
|
||||||
if ( params.shortread_clipmerge ) {
|
if ( params.shortread_clipmerge ) {
|
||||||
SHORTREAD_PREPROCESSING ( INPUT_CHECK.out.fastq )
|
ch_shortreads_preprocessed = SHORTREAD_PREPROCESSING ( INPUT_CHECK.out.fastq ).reads
|
||||||
|
} else {
|
||||||
|
ch_shortreads_preprocessed = INPUT_CHECK.out.fastq
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( params.longread_clip ) {
|
if ( params.longread_clip ) {
|
||||||
|
@ -113,9 +115,10 @@ workflow TAXPROFILER {
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// PERFORM RUN MERGING
|
// PERFORM SHORT READ RUN MERGING
|
||||||
|
// TODO: Check not necessary for long reads too?
|
||||||
//
|
//
|
||||||
ch_processed_for_combine = SHORTREAD_PREPROCESSING.out.reads
|
ch_processed_for_combine = ch_shortreads_preprocessed
|
||||||
.dump(tag: "prep_for_combine_grouping")
|
.dump(tag: "prep_for_combine_grouping")
|
||||||
.map {
|
.map {
|
||||||
meta, reads ->
|
meta, reads ->
|
||||||
|
@ -140,7 +143,7 @@ workflow TAXPROFILER {
|
||||||
// COMBINE READS WITH POSSIBLE DATABASES
|
// COMBINE READS WITH POSSIBLE DATABASES
|
||||||
//
|
//
|
||||||
|
|
||||||
// output [DUMP: reads_plus_db] [['id':'2612', 'run_accession':'combined', 'instrument_platform':'ILLUMINA', 'single_end':1], <reads_path>/2612.merged.fastq.gz, ['tool':'malt', 'db_name':'mal95', 'db_params':'"-id 90"'], <db_path>/malt90]
|
// e.g. output [DUMP: reads_plus_db] [['id':'2612', 'run_accession':'combined', 'instrument_platform':'ILLUMINA', 'single_end':1], <reads_path>/2612.merged.fastq.gz, ['tool':'malt', 'db_name':'mal95', 'db_params':'"-id 90"'], <db_path>/malt90]
|
||||||
ch_input_for_profiling = ch_reads_for_profiling
|
ch_input_for_profiling = ch_reads_for_profiling
|
||||||
.mix( ch_longreads_preprocessed )
|
.mix( ch_longreads_preprocessed )
|
||||||
.combine(DB_CHECK.out.dbs)
|
.combine(DB_CHECK.out.dbs)
|
||||||
|
@ -152,7 +155,7 @@ workflow TAXPROFILER {
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// PREP PROFILER INPUT CHANNELS ON PER TOOL BASIS
|
// PREPARE PROFILER INPUT CHANNELS
|
||||||
//
|
//
|
||||||
|
|
||||||
// We groupTuple to have all samples in one channel for MALT as database
|
// We groupTuple to have all samples in one channel for MALT as database
|
||||||
|
|
Loading…
Reference in a new issue