mirror of
https://github.com/MillironX/taxprofiler.git
synced 2024-11-22 11:19:54 +00:00
Merge branch 'nf-core:dev' into update_output
This commit is contained in:
commit
3eb6a29fb0
30 changed files with 749 additions and 432 deletions
|
@ -77,8 +77,6 @@ On release, automated continuous integration tests run the pipeline on a full-si
|
||||||
nextflow run nf-core/taxprofiler --input samplesheet.csv --databases database.csv --outdir <OUTDIR> --run_<TOOL1> --run_<TOOL1> -profile <docker/singularity/podman/shifter/charliecloud/conda/institute>
|
nextflow run nf-core/taxprofiler --input samplesheet.csv --databases database.csv --outdir <OUTDIR> --run_<TOOL1> --run_<TOOL1> -profile <docker/singularity/podman/shifter/charliecloud/conda/institute>
|
||||||
```
|
```
|
||||||
|
|
||||||
Note pipeline supports both CSV and PEP input sample sheets. Find out more [here](http://pep.databio.org/en/2.1.0/specification/).
|
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
The nf-core/taxprofiler pipeline comes with documentation about the pipeline [usage](https://nf-co.re/taxprofiler/usage), [parameters](https://nf-co.re/taxprofiler/parameters) and [output](https://nf-co.re/taxprofiler/output).
|
The nf-core/taxprofiler pipeline comes with documentation about the pipeline [usage](https://nf-co.re/taxprofiler/usage), [parameters](https://nf-co.re/taxprofiler/parameters) and [output](https://nf-co.re/taxprofiler/output).
|
||||||
|
|
|
@ -19,11 +19,20 @@ custom_logo_title: "nf-core/taxprofiler"
|
||||||
run_modules:
|
run_modules:
|
||||||
- fastqc
|
- fastqc
|
||||||
- adapterRemoval
|
- adapterRemoval
|
||||||
|
- bbduk
|
||||||
|
- prinseqplusplus
|
||||||
- fastp
|
- fastp
|
||||||
|
- filtlong
|
||||||
- bowtie2
|
- bowtie2
|
||||||
|
- minimap2
|
||||||
- samtools
|
- samtools
|
||||||
- kraken
|
- kraken
|
||||||
|
- kaiju
|
||||||
|
- metaphlan
|
||||||
|
- diamond
|
||||||
- malt
|
- malt
|
||||||
|
- motus
|
||||||
|
- porechop
|
||||||
- custom_content
|
- custom_content
|
||||||
|
|
||||||
#extra_fn_clean_exts:
|
#extra_fn_clean_exts:
|
||||||
|
@ -36,16 +45,41 @@ top_modules:
|
||||||
name: "FastQC (pre-Trimming)"
|
name: "FastQC (pre-Trimming)"
|
||||||
path_filters:
|
path_filters:
|
||||||
- "*raw_*fastqc.zip"
|
- "*raw_*fastqc.zip"
|
||||||
|
- "fastqc":
|
||||||
|
name: "Falco (pre-Trimming)"
|
||||||
|
path_filters:
|
||||||
|
- "*_raw_falco_*_report.html"
|
||||||
- "fastp"
|
- "fastp"
|
||||||
- "adapterRemoval"
|
- "adapterRemoval"
|
||||||
|
- "porechop"
|
||||||
- "fastqc":
|
- "fastqc":
|
||||||
name: "FastQC (post-Trimming)"
|
name: "FastQC (post-Trimming)"
|
||||||
path_filters:
|
path_filters:
|
||||||
- "*raw_*processed.zip"
|
- "*_processed_*fastqc.zip"
|
||||||
|
- "fastqc":
|
||||||
|
name: "Falco (post-Trimming)"
|
||||||
|
path_filters:
|
||||||
|
- "*_processed_falco_*_report.html"
|
||||||
|
- "bbduk"
|
||||||
|
- "prinseqplusplus"
|
||||||
|
- "filtlong"
|
||||||
|
- "bowtie2":
|
||||||
|
name: "bowtie2"
|
||||||
|
- "samtools":
|
||||||
|
name: "Samtools Stats"
|
||||||
- "kraken":
|
- "kraken":
|
||||||
name: "Kraken"
|
name: "Kraken"
|
||||||
path_filters:
|
path_filters:
|
||||||
- "*.kraken2.report.txt"
|
- "*.kraken2.kraken2.report.txt"
|
||||||
|
- "kraken":
|
||||||
|
name: "Bracken"
|
||||||
|
anchor: "bracken"
|
||||||
|
target: "Bracken"
|
||||||
|
doi: "10.7717/peerj-cs.104"
|
||||||
|
info: "Estimates species abundances in metagenomics samples by probabilistically re-distributing reads in the taxonomic tree."
|
||||||
|
extra: "Note: plot title will say Kraken2 due to the first step of bracken producing the same output format as Kraken. Abundance information is currently not supported in MultiQC."
|
||||||
|
path_filters:
|
||||||
|
- "*.bracken.kraken2.report.txt"
|
||||||
- "kraken":
|
- "kraken":
|
||||||
name: "Centrifuge"
|
name: "Centrifuge"
|
||||||
anchor: "centrifuge"
|
anchor: "centrifuge"
|
||||||
|
@ -55,3 +89,171 @@ top_modules:
|
||||||
extra: "Note: plot title will say Kraken2 due to Centrifuge producing the same output format as Kraken. If activated, see the actual Kraken2 results in the section above."
|
extra: "Note: plot title will say Kraken2 due to Centrifuge producing the same output format as Kraken. If activated, see the actual Kraken2 results in the section above."
|
||||||
path_filters:
|
path_filters:
|
||||||
- "*.centrifuge.txt"
|
- "*.centrifuge.txt"
|
||||||
|
- "malt":
|
||||||
|
name: "MALT"
|
||||||
|
- "diamond"
|
||||||
|
- "kaiju":
|
||||||
|
name: "Kaiju"
|
||||||
|
- "motus"
|
||||||
|
|
||||||
|
#It is not possible to set placement for custom kraken and centrifuge columns.
|
||||||
|
|
||||||
|
table_columns_placement:
|
||||||
|
FastQC (pre-Trimming):
|
||||||
|
total_sequences: 100
|
||||||
|
avg_sequence_length: 110
|
||||||
|
percent_duplicates: 120
|
||||||
|
percent_gc: 130
|
||||||
|
percent_fails: 140
|
||||||
|
Falco (pre-Trimming):
|
||||||
|
total_sequences: 200
|
||||||
|
avg_sequence_length: 210
|
||||||
|
percent_duplicates: 220
|
||||||
|
percent_gc: 230
|
||||||
|
percent_fails: 240
|
||||||
|
fastp:
|
||||||
|
pct_adapter: 300
|
||||||
|
pct_surviving: 310
|
||||||
|
pct_duplication: 320
|
||||||
|
after_filtering_gc_content: 330
|
||||||
|
after_filtering_q30_rate: 340
|
||||||
|
after_filtering_q30_bases: 350
|
||||||
|
Adapter Removal:
|
||||||
|
aligned_total: 360
|
||||||
|
percent_aligned: 370
|
||||||
|
percent_collapsed: 380
|
||||||
|
percent_discarded: 390
|
||||||
|
FastQC (post-Trimming):
|
||||||
|
total_sequences: 400
|
||||||
|
avg_sequence_length: 410
|
||||||
|
percent_duplicates: 420
|
||||||
|
percent_gc: 430
|
||||||
|
percent_fails: 440
|
||||||
|
Falco (post-Trimming):
|
||||||
|
total_sequences: 500
|
||||||
|
avg_sequence_length: 510
|
||||||
|
percent_duplicates: 520
|
||||||
|
percent_gc: 530
|
||||||
|
percent_fails: 540
|
||||||
|
bowtie2:
|
||||||
|
overall_alignment_rate: 600
|
||||||
|
Samtools Stats:
|
||||||
|
raw_total_sequences: 700
|
||||||
|
reads_mapped: 710
|
||||||
|
reads_mapped_percent: 720
|
||||||
|
reads_properly_paired_percent: 730
|
||||||
|
non-primary_alignments: 740
|
||||||
|
reads_MQ0_percent: 750
|
||||||
|
error_rate: 760
|
||||||
|
MALT:
|
||||||
|
Num. of queries: 1000
|
||||||
|
Total reads: 1100
|
||||||
|
Mappability: 1200
|
||||||
|
Assig. Taxonomy: 1300
|
||||||
|
Taxonomic assignment success: 1400
|
||||||
|
Kaiju:
|
||||||
|
assigned: 2000
|
||||||
|
"% Assigned": 2100
|
||||||
|
"% Unclassified": 2200
|
||||||
|
|
||||||
|
table_columns_visible:
|
||||||
|
FastQC (pre-Trimming):
|
||||||
|
total_sequences: True
|
||||||
|
avg_sequence_length: True
|
||||||
|
percent_duplicates: True
|
||||||
|
percent_gc: True
|
||||||
|
percent_fails: False
|
||||||
|
Falco (pre-Trimming):
|
||||||
|
total_sequences: True
|
||||||
|
avg_sequence_length: True
|
||||||
|
percent_duplicates: True
|
||||||
|
percent_gc: True
|
||||||
|
percent_fails: False
|
||||||
|
fastp:
|
||||||
|
pct_adapter: True
|
||||||
|
pct_surviving: True
|
||||||
|
pct_duplication: False
|
||||||
|
after_filtering_gc_content: False
|
||||||
|
after_filtering_q30_rate: False
|
||||||
|
after_filtering_q30_bases: False
|
||||||
|
Adapter Removal:
|
||||||
|
aligned_total: True
|
||||||
|
percent_aligned: True
|
||||||
|
percent_collapsed: True
|
||||||
|
percent_discarded: False
|
||||||
|
FastQC (post-Trimming):
|
||||||
|
total_sequences: True
|
||||||
|
avg_sequence_length: True
|
||||||
|
percent_duplicates: False
|
||||||
|
percent_gc: False
|
||||||
|
percent_fails: False
|
||||||
|
Falco (post-Trimming):
|
||||||
|
total_sequences: True
|
||||||
|
avg_sequence_length: True
|
||||||
|
percent_duplicates: False
|
||||||
|
percent_gc: False
|
||||||
|
percent_fails: False
|
||||||
|
bowtie2:
|
||||||
|
overall_alignment_rate: True
|
||||||
|
Samtools Stats:
|
||||||
|
raw_total_sequences: True
|
||||||
|
reads_mapped: True
|
||||||
|
reads_mapped_percent: True
|
||||||
|
reads_properly_paired_percent: False
|
||||||
|
non-primary_alignments: False
|
||||||
|
reads_MQ0_percent: False
|
||||||
|
error_rate: False
|
||||||
|
Kraken:
|
||||||
|
"% Unclassified": True
|
||||||
|
"% Top 5": False
|
||||||
|
Bracken:
|
||||||
|
"% Unclassified": True
|
||||||
|
"% Top 5": False
|
||||||
|
Centrifuge:
|
||||||
|
"% Unclassified": True
|
||||||
|
"% Top 5": False
|
||||||
|
MALT:
|
||||||
|
Num. of queries: True
|
||||||
|
Total reads: True
|
||||||
|
Mappability: True
|
||||||
|
Assig. Taxonomy: False
|
||||||
|
Taxonomic assignment success: True
|
||||||
|
Kaiju:
|
||||||
|
assigned: False
|
||||||
|
"% Assigned": False
|
||||||
|
"% Unclassified": True
|
||||||
|
table_columns_name:
|
||||||
|
FastQC (pre-Trimming):
|
||||||
|
total_sequences: "Nr. Input Reads"
|
||||||
|
avg_sequence_length: "Length Input Reads"
|
||||||
|
percent_gc: "% GC Input Reads"
|
||||||
|
percent_duplicates: "% Dups Input Reads"
|
||||||
|
percent_fails: "% Failed Input Reads"
|
||||||
|
Falco (pre-Trimming):
|
||||||
|
total_sequences: "Nr. Input Reads"
|
||||||
|
avg_sequence_length: "Length Input Reads"
|
||||||
|
percent_gc: "% GC Input Reads"
|
||||||
|
percent_duplicates: "% Dups Input Reads"
|
||||||
|
percent_fails: "% Failed Input Reads"
|
||||||
|
FastQC (post-Trimming):
|
||||||
|
total_sequences: "Nr. Processed Reads"
|
||||||
|
avg_sequence_length: "Length Processed Reads"
|
||||||
|
percent_gc: "% GC Processed Reads"
|
||||||
|
percent_duplicates: "% Dups Processed Reads"
|
||||||
|
percent_fails: "% Failed Processed Reads"
|
||||||
|
Falco (post-Trimming):
|
||||||
|
total_sequences: "Nr. Processed Reads"
|
||||||
|
avg_sequence_length: "Length Processed Reads"
|
||||||
|
percent_gc: "% GC Processed Reads"
|
||||||
|
percent_duplicates: "% Dups Processed Reads"
|
||||||
|
percent_fails: "% Failed Processed Reads"
|
||||||
|
Samtools Stats:
|
||||||
|
raw_total_sequences: "Nr. Reads Into Mapping"
|
||||||
|
reads_mapped: "Nr. Mapped Reads"
|
||||||
|
reads_mapped_percent: "% Mapped Reads"
|
||||||
|
|
||||||
|
extra_fn_clean_exts:
|
||||||
|
- ".kraken2.kraken2.report.txt"
|
||||||
|
- ".centrifuge.txt"
|
||||||
|
- ".bracken.kraken2.report.txt"
|
||||||
|
- ".settings"
|
||||||
|
|
|
@ -1,55 +0,0 @@
|
||||||
description: A schema for validation of samplesheet.csv for taxprofiler pipeline.
|
|
||||||
imports:
|
|
||||||
- https://schema.databio.org/pep/2.1.0.yaml
|
|
||||||
properties:
|
|
||||||
samples:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
sample:
|
|
||||||
type: string
|
|
||||||
description: "Sample identifier."
|
|
||||||
pattern: "^\\S*$"
|
|
||||||
run_accession:
|
|
||||||
type: string
|
|
||||||
description: "Run accession number."
|
|
||||||
instrument_platform:
|
|
||||||
type: string
|
|
||||||
description: "Name of the platform that sequenced the samples."
|
|
||||||
enum:
|
|
||||||
[
|
|
||||||
"ABI_SOLID",
|
|
||||||
"BGISEQ",
|
|
||||||
"CAPILLARY",
|
|
||||||
"COMPLETE_GENOMICS",
|
|
||||||
"DNBSEQ",
|
|
||||||
"HELICOS",
|
|
||||||
"ILLUMINA",
|
|
||||||
"ION_TORRENT",
|
|
||||||
"LS454",
|
|
||||||
"OXFORD_NANOPORE",
|
|
||||||
"PACBIO_SMRT",
|
|
||||||
]
|
|
||||||
fastq1:
|
|
||||||
type: ["string", "null"]
|
|
||||||
description: "Optional FASTQ file for read 1 of paired-end sequenced libraries."
|
|
||||||
pattern: "^[\\S]+.(fq\\.gz|fastq\\.gz)$"
|
|
||||||
fastq2:
|
|
||||||
type: ["string", "null"]
|
|
||||||
description: "Optional FASTQ file for read 2 of paired-end sequenced libraries."
|
|
||||||
pattern: "^[\\S]+.(fq\\.gz|fastq\\.gz)$"
|
|
||||||
fasta:
|
|
||||||
type: ["string", "null"]
|
|
||||||
description: "Optional FASTA file."
|
|
||||||
pattern: "^[\\S]+.(fa\\.gz|fasta\\.gz)$"
|
|
||||||
required:
|
|
||||||
- sample
|
|
||||||
- run_accession
|
|
||||||
- instrument_platform
|
|
||||||
files:
|
|
||||||
- fastq1
|
|
||||||
- fastq2
|
|
||||||
- fasta
|
|
||||||
required:
|
|
||||||
- samples
|
|
233
bin/check_samplesheet.py
Executable file
233
bin/check_samplesheet.py
Executable file
|
@ -0,0 +1,233 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from distutils import extension
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import errno
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args(args=None):
|
||||||
|
Description = "Reformat nf-core/taxprofiler samplesheet file and check its contents."
|
||||||
|
|
||||||
|
Epilog = "Example usage: python check_samplesheet.py <FILE_IN> <FILE_OUT>"
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description=Description, epilog=Epilog)
|
||||||
|
parser.add_argument("FILE_IN", help="Input samplesheet file.")
|
||||||
|
parser.add_argument("FILE_OUT", help="Output file.")
|
||||||
|
return parser.parse_args(args)
|
||||||
|
|
||||||
|
|
||||||
|
def make_dir(path):
|
||||||
|
if len(path) > 0:
|
||||||
|
try:
|
||||||
|
os.makedirs(path)
|
||||||
|
except OSError as exception:
|
||||||
|
if exception.errno != errno.EEXIST:
|
||||||
|
raise exception
|
||||||
|
|
||||||
|
|
||||||
|
def print_error(error, context="Line", context_str=""):
|
||||||
|
error_str = "ERROR: Please check samplesheet -> {}".format(error)
|
||||||
|
if context != "" and context_str != "":
|
||||||
|
error_str = "ERROR: Please check samplesheet -> {}\n{}: '{}'".format(
|
||||||
|
error, context.strip(), context_str.strip()
|
||||||
|
)
|
||||||
|
print(error_str)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def check_samplesheet(file_in, file_out):
|
||||||
|
"""
|
||||||
|
This function checks that the samplesheet follows the following structure:
|
||||||
|
|
||||||
|
sample,run_accession,instrument_platform,fastq_1,fastq_2,fasta
|
||||||
|
2611,ERR5766174,ILLUMINA,,,ERX5474930_ERR5766174_1.fa.gz
|
||||||
|
2612,ERR5766176,ILLUMINA,ERX5474932_ERR5766176_1.fastq.gz,ERX5474932_ERR5766176_2.fastq.gz,
|
||||||
|
2612,ERR5766174,ILLUMINA,ERX5474936_ERR5766180_1.fastq.gz,,
|
||||||
|
2613,ERR5766181,ILLUMINA,ERX5474937_ERR5766181_1.fastq.gz,ERX5474937_ERR5766181_2.fastq.gz,
|
||||||
|
"""
|
||||||
|
|
||||||
|
FQ_EXTENSIONS = (".fq.gz", ".fastq.gz")
|
||||||
|
FA_EXTENSIONS = (
|
||||||
|
".fa.gz",
|
||||||
|
".fasta.gz",
|
||||||
|
".fna.gz",
|
||||||
|
".fas.gz",
|
||||||
|
)
|
||||||
|
INSTRUMENT_PLATFORMS = [
|
||||||
|
"ABI_SOLID",
|
||||||
|
"BGISEQ",
|
||||||
|
"CAPILLARY",
|
||||||
|
"COMPLETE_GENOMICS",
|
||||||
|
"DNBSEQ",
|
||||||
|
"HELICOS",
|
||||||
|
"ILLUMINA",
|
||||||
|
"ION_TORRENT",
|
||||||
|
"LS454",
|
||||||
|
"OXFORD_NANOPORE",
|
||||||
|
"PACBIO_SMRT",
|
||||||
|
]
|
||||||
|
|
||||||
|
sample_mapping_dict = {}
|
||||||
|
with open(file_in, "r") as fin:
|
||||||
|
|
||||||
|
## Check header
|
||||||
|
MIN_COLS = 4
|
||||||
|
HEADER = [
|
||||||
|
"sample",
|
||||||
|
"run_accession",
|
||||||
|
"instrument_platform",
|
||||||
|
"fastq_1",
|
||||||
|
"fastq_2",
|
||||||
|
"fasta",
|
||||||
|
]
|
||||||
|
header = [x.strip('"') for x in fin.readline().strip().split(",")]
|
||||||
|
|
||||||
|
## Check for missing mandatory columns
|
||||||
|
missing_columns = list(set(HEADER) - set(header))
|
||||||
|
if len(missing_columns) > 0:
|
||||||
|
print(
|
||||||
|
"ERROR: Missing required column header -> {}. Note some columns can otherwise be empty. See pipeline documentation (https://nf-co.re/taxprofiler/usage).".format(
|
||||||
|
",".join(missing_columns)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
## Find locations of mandatory columns
|
||||||
|
header_locs = {}
|
||||||
|
for i in HEADER:
|
||||||
|
header_locs[i] = header.index(i)
|
||||||
|
|
||||||
|
## Check sample entries
|
||||||
|
for line in fin:
|
||||||
|
|
||||||
|
## Pull out only relevant columns for downstream checking
|
||||||
|
line_parsed = [x.strip().strip('"') for x in line.strip().split(",")]
|
||||||
|
|
||||||
|
# Check valid number of columns per row
|
||||||
|
if len(line_parsed) < len(HEADER):
|
||||||
|
print_error(
|
||||||
|
"Invalid number of columns (minimum = {})!".format(len(HEADER)),
|
||||||
|
"Line",
|
||||||
|
line,
|
||||||
|
)
|
||||||
|
num_cols = len([x for x in line_parsed if x])
|
||||||
|
if num_cols < MIN_COLS:
|
||||||
|
print_error(
|
||||||
|
"Invalid number of populated columns (minimum = {})!".format(MIN_COLS),
|
||||||
|
"Line",
|
||||||
|
line,
|
||||||
|
)
|
||||||
|
|
||||||
|
lspl = [line_parsed[i] for i in header_locs.values()]
|
||||||
|
|
||||||
|
## Check sample name entries
|
||||||
|
|
||||||
|
(
|
||||||
|
sample,
|
||||||
|
run_accession,
|
||||||
|
instrument_platform,
|
||||||
|
fastq_1,
|
||||||
|
fastq_2,
|
||||||
|
fasta,
|
||||||
|
) = lspl[: len(HEADER)]
|
||||||
|
sample = sample.replace(" ", "_")
|
||||||
|
if not sample:
|
||||||
|
print_error("Sample entry has not been specified!", "Line", line)
|
||||||
|
|
||||||
|
## Check FastQ file extension
|
||||||
|
for fastq in [fastq_1, fastq_2]:
|
||||||
|
if fastq:
|
||||||
|
if fastq.find(" ") != -1:
|
||||||
|
print_error("FastQ file contains spaces!", "Line", line)
|
||||||
|
if not fastq.endswith(FQ_EXTENSIONS):
|
||||||
|
print_error(
|
||||||
|
f"FastQ file does not have extension {' or '.join(list(FQ_EXTENSIONS))} !",
|
||||||
|
"Line",
|
||||||
|
line,
|
||||||
|
)
|
||||||
|
if fasta:
|
||||||
|
if fasta.find(" ") != -1:
|
||||||
|
print_error("FastA file contains spaces!", "Line", line)
|
||||||
|
if not fasta.endswith(FA_EXTENSIONS):
|
||||||
|
print_error(
|
||||||
|
f"FastA file does not have extension {' or '.join(list(FA_EXTENSIONS))}!",
|
||||||
|
"Line",
|
||||||
|
line,
|
||||||
|
)
|
||||||
|
sample_info = []
|
||||||
|
|
||||||
|
# Check run_accession
|
||||||
|
if not run_accession:
|
||||||
|
print_error("Run accession has not been specified!", "Line", line)
|
||||||
|
else:
|
||||||
|
sample_info.append(run_accession)
|
||||||
|
|
||||||
|
# Check instrument_platform
|
||||||
|
if not instrument_platform:
|
||||||
|
print_error("Instrument platform has not been specified!", "Line", line)
|
||||||
|
else:
|
||||||
|
if instrument_platform not in INSTRUMENT_PLATFORMS:
|
||||||
|
print_error(
|
||||||
|
f"Instrument platform {instrument_platform} is not supported! "
|
||||||
|
f"List of supported platforms {', '.join(INSTRUMENT_PLATFORMS)}",
|
||||||
|
"Line",
|
||||||
|
line,
|
||||||
|
)
|
||||||
|
sample_info.append(instrument_platform)
|
||||||
|
|
||||||
|
## Auto-detect paired-end/single-end
|
||||||
|
if sample and fastq_1 and fastq_2: ## Paired-end short reads
|
||||||
|
sample_info.extend(["0", fastq_1, fastq_2, fasta])
|
||||||
|
elif sample and fastq_1 and not fastq_2: ## Single-end short/long fastq reads
|
||||||
|
sample_info.extend(["1", fastq_1, fastq_2, fasta])
|
||||||
|
elif sample and fasta and not fastq_1 and not fastq_2: ## Single-end long reads
|
||||||
|
sample_info.extend(["1", fastq_1, fastq_2, fasta])
|
||||||
|
elif fasta and (fastq_1 or fastq_2):
|
||||||
|
print_error(
|
||||||
|
"FastQ and FastA files cannot be specified together in the same library!",
|
||||||
|
"Line",
|
||||||
|
line,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print_error("Invalid combination of columns provided!", "Line", line)
|
||||||
|
|
||||||
|
## Create sample mapping dictionary = { sample: [ run_accession, instrument_platform, single_end, fastq_1, fastq_2 , fasta ] }
|
||||||
|
if sample not in sample_mapping_dict:
|
||||||
|
sample_mapping_dict[sample] = [sample_info]
|
||||||
|
else:
|
||||||
|
if sample_info in sample_mapping_dict[sample]:
|
||||||
|
print_error("Samplesheet contains duplicate rows!", "Line", line)
|
||||||
|
else:
|
||||||
|
sample_mapping_dict[sample].append(sample_info)
|
||||||
|
|
||||||
|
## Write validated samplesheet with appropriate columns
|
||||||
|
HEADER_OUT = [
|
||||||
|
"sample",
|
||||||
|
"run_accession",
|
||||||
|
"instrument_platform",
|
||||||
|
"single_end",
|
||||||
|
"fastq_1",
|
||||||
|
"fastq_2",
|
||||||
|
"fasta",
|
||||||
|
]
|
||||||
|
if len(sample_mapping_dict) > 0:
|
||||||
|
out_dir = os.path.dirname(file_out)
|
||||||
|
make_dir(out_dir)
|
||||||
|
with open(file_out, "w") as fout:
|
||||||
|
fout.write(",".join(HEADER_OUT) + "\n")
|
||||||
|
for sample in sorted(sample_mapping_dict.keys()):
|
||||||
|
for idx, val in enumerate(sample_mapping_dict[sample]):
|
||||||
|
fout.write(f"{sample},{','.join(val)}\n")
|
||||||
|
else:
|
||||||
|
print_error("No entries to process!", "Samplesheet: {}".format(file_in))
|
||||||
|
|
||||||
|
|
||||||
|
def main(args=None):
|
||||||
|
args = parse_args(args)
|
||||||
|
check_samplesheet(args.FILE_IN, args.FILE_OUT)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
|
@ -12,14 +12,6 @@
|
||||||
|
|
||||||
process {
|
process {
|
||||||
|
|
||||||
withName: DATABASE_CHECK {
|
|
||||||
publishDir = [
|
|
||||||
path: { "${params.outdir}/pipeline_info" },
|
|
||||||
mode: params.publish_dir_mode,
|
|
||||||
saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
withName: FASTQC {
|
withName: FASTQC {
|
||||||
ext.args = '--quiet'
|
ext.args = '--quiet'
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}_raw" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}_raw" }
|
||||||
|
@ -41,7 +33,7 @@ process {
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: FALCO {
|
withName: FALCO {
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}_raw" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}_raw_falco" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/falco/raw" },
|
path: { "${params.outdir}/falco/raw" },
|
||||||
mode: params.publish_dir_mode,
|
mode: params.publish_dir_mode,
|
||||||
|
@ -50,7 +42,7 @@ process {
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: FALCO_PROCESSED {
|
withName: FALCO_PROCESSED {
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}_processed" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}_processed_falco" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/falco/processed" },
|
path: { "${params.outdir}/falco/processed" },
|
||||||
mode: params.publish_dir_mode,
|
mode: params.publish_dir_mode,
|
||||||
|
@ -69,10 +61,17 @@ process {
|
||||||
].join(' ').trim()
|
].join(' ').trim()
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/fastp" },
|
[
|
||||||
mode: params.publish_dir_mode,
|
path: { "${params.outdir}/fastp" },
|
||||||
pattern: '*.fastq.gz',
|
mode: params.publish_dir_mode,
|
||||||
enabled: params.save_preprocessed_reads
|
pattern: '*.fastq.gz',
|
||||||
|
enabled: params.save_preprocessed_reads
|
||||||
|
],
|
||||||
|
[
|
||||||
|
path: { "${params.outdir}/fastp" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
pattern: '*.{log,html,json}'
|
||||||
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,10 +89,17 @@ process {
|
||||||
].join(' ').trim()
|
].join(' ').trim()
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/fastp" },
|
[
|
||||||
mode: params.publish_dir_mode,
|
path: { "${params.outdir}/fastp" },
|
||||||
pattern: '*.fastq.gz',
|
mode: params.publish_dir_mode,
|
||||||
enabled: params.save_preprocessed_reads
|
pattern: '*.fastq.gz',
|
||||||
|
enabled: params.save_preprocessed_reads
|
||||||
|
],
|
||||||
|
[
|
||||||
|
path: { "${params.outdir}/fastp" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
pattern: '*.{log,html,json}'
|
||||||
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,10 +112,17 @@ process {
|
||||||
].join(' ').trim()
|
].join(' ').trim()
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/adapterremoval" },
|
[
|
||||||
mode: params.publish_dir_mode,
|
path: { "${params.outdir}/adapterremoval" },
|
||||||
pattern: '*.fastq.gz',
|
mode: params.publish_dir_mode,
|
||||||
enabled: params.save_preprocessed_reads
|
pattern: '*.fastq.gz',
|
||||||
|
enabled: params.save_preprocessed_reads
|
||||||
|
],
|
||||||
|
[
|
||||||
|
path: { "${params.outdir}/adapterremoval" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
pattern: '*.settings'
|
||||||
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,20 +138,34 @@ process {
|
||||||
].join(' ').trim()
|
].join(' ').trim()
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/adapterremoval" },
|
[
|
||||||
mode: params.publish_dir_mode,
|
path: { "${params.outdir}/adapterremoval" },
|
||||||
pattern: '*.fastq.gz',
|
mode: params.publish_dir_mode,
|
||||||
enabled: params.save_preprocessed_reads
|
pattern: '*.fastq.gz',
|
||||||
|
enabled: params.save_preprocessed_reads
|
||||||
|
],
|
||||||
|
[
|
||||||
|
path: { "${params.outdir}/adapterremoval" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
pattern: '*.settings'
|
||||||
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: PORECHOP_PORECHOP {
|
withName: PORECHOP_PORECHOP {
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/porechop" },
|
[
|
||||||
mode: params.publish_dir_mode,
|
path: { "${params.outdir}/porechop" },
|
||||||
pattern: '*.fastq.gz',
|
mode: params.publish_dir_mode,
|
||||||
enabled: params.save_preprocessed_reads
|
pattern: '*.fastq.gz',
|
||||||
|
enabled: params.save_preprocessed_reads
|
||||||
|
],
|
||||||
|
[
|
||||||
|
path: { "${params.outdir}/porechop" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
pattern: '*.log'
|
||||||
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,10 +178,17 @@ process {
|
||||||
.join(' ').trim()
|
.join(' ').trim()
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}_filtered" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}_filtered" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/filtlong" },
|
[
|
||||||
mode: params.publish_dir_mode,
|
path: { "${params.outdir}/filtlong" },
|
||||||
pattern: '*.{fastq.gz,log}',
|
mode: params.publish_dir_mode,
|
||||||
enabled: params.save_preprocessed_reads
|
pattern: '*.fastq.gz',
|
||||||
|
enabled: params.save_preprocessed_reads
|
||||||
|
],
|
||||||
|
[
|
||||||
|
path: { "${params.outdir}/filtlong" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
pattern: '*.log'
|
||||||
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,21 +205,21 @@ process {
|
||||||
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
ext.prefix = { "${meta.id}_${meta.run_accession}" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
[
|
[
|
||||||
path: { "${params.outdir}/bowtie2/align" },
|
path: { "${params.outdir}/bowtie2/align" },
|
||||||
mode: params.publish_dir_mode,
|
mode: params.publish_dir_mode,
|
||||||
pattern: '*.log'
|
pattern: '*.log'
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
path: { "${params.outdir}/bowtie2/align" },
|
path: { "${params.outdir}/bowtie2/align" },
|
||||||
mode: params.publish_dir_mode,
|
mode: params.publish_dir_mode,
|
||||||
enabled: params.save_hostremoval_mapped,
|
enabled: params.save_hostremoval_mapped,
|
||||||
pattern: '*.bam'
|
pattern: '*.bam'
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
path: { "${params.outdir}/bowtie2/align" },
|
path: { "${params.outdir}/bowtie2/align" },
|
||||||
mode: params.publish_dir_mode,
|
mode: params.publish_dir_mode,
|
||||||
enabled: params.save_hostremoval_unmapped,
|
enabled: params.save_hostremoval_unmapped,
|
||||||
pattern: '*.fastq.gz'
|
pattern: '*.fastq.gz'
|
||||||
]
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -248,10 +282,17 @@ process {
|
||||||
].join(' ').trim()
|
].join(' ').trim()
|
||||||
ext.prefix = { "${meta.id}-${meta.run_accession}" }
|
ext.prefix = { "${meta.id}-${meta.run_accession}" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/bbduk/" },
|
[
|
||||||
mode: params.publish_dir_mode,
|
path: { "${params.outdir}/bbduk/" },
|
||||||
pattern: '*.{fastq.gz,log}',
|
mode: params.publish_dir_mode,
|
||||||
enabled: params.save_complexityfiltered_reads
|
pattern: '*.{fastq.gz,log}',
|
||||||
|
enabled: params.save_complexityfiltered_reads
|
||||||
|
],
|
||||||
|
[
|
||||||
|
path: { "${params.outdir}/bbduk/" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
pattern: '*.log'
|
||||||
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -263,10 +304,17 @@ process {
|
||||||
].join(' ').trim()
|
].join(' ').trim()
|
||||||
ext.prefix = { "${meta.id}-${meta.run_accession}" }
|
ext.prefix = { "${meta.id}-${meta.run_accession}" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/prinseqplusplus/" },
|
[
|
||||||
mode: params.publish_dir_mode,
|
path: { "${params.outdir}/prinseqplusplus/" },
|
||||||
pattern: '*{_good_out.fastq.gz,_good_out_R1.fastq.gz,_good_out_R2.fastq.gz,log}',
|
mode: params.publish_dir_mode,
|
||||||
enabled: params.save_complexityfiltered_reads
|
pattern: '*{_good_out.fastq.gz,_good_out_R1.fastq.gz,_good_out_R2.fastq.gz}',
|
||||||
|
enabled: params.save_complexityfiltered_reads
|
||||||
|
],
|
||||||
|
[
|
||||||
|
path: { "${params.outdir}/prinseqplusplus/" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
pattern: '*.log'
|
||||||
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -303,7 +351,7 @@ process {
|
||||||
|
|
||||||
withName: KRAKEN2_KRAKEN2 {
|
withName: KRAKEN2_KRAKEN2 {
|
||||||
ext.args = params.kraken2_save_minimizers ? { "${meta.db_params} --report-minimizer-data" } : { "${meta.db_params}" }
|
ext.args = params.kraken2_save_minimizers ? { "${meta.db_params} --report-minimizer-data" } : { "${meta.db_params}" }
|
||||||
ext.prefix = params.perform_runmerging ? { "${meta.id}-${meta.db_name}" } : { "${meta.id}-${meta.run_accession}-${meta.db_name}" }
|
ext.prefix = params.perform_runmerging ? { meta.tool == "bracken" ? "${meta.id}-${meta.db_name}.bracken" : "${meta.id}-${meta.db_name}" } : { meta.tool == "bracken" ? "${meta.id}-${meta.run_accession}-${meta.db_name}.bracken" : "${meta.id}-${meta.run_accession}-${meta.db_name}" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/kraken2/${meta.db_name}/" },
|
path: { "${params.outdir}/kraken2/${meta.db_name}/" },
|
||||||
mode: params.publish_dir_mode,
|
mode: params.publish_dir_mode,
|
||||||
|
@ -313,7 +361,7 @@ process {
|
||||||
|
|
||||||
withName: BRACKEN_BRACKEN {
|
withName: BRACKEN_BRACKEN {
|
||||||
errorStrategy = 'ignore'
|
errorStrategy = 'ignore'
|
||||||
ext.prefix = params.perform_runmerging ? { "${meta.id}-${meta.db_name}" } : { "${meta.id}-${meta.run_accession}-${meta.db_name}" }
|
ext.prefix = params.perform_runmerging ? { "${meta.id}-${meta.db_name}.bracken" } : { "${meta.id}-${meta.run_accession}-${meta.db_name}.bracken" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/bracken/${meta.db_name}/" },
|
path: { "${params.outdir}/bracken/${meta.db_name}/" },
|
||||||
mode: params.publish_dir_mode,
|
mode: params.publish_dir_mode,
|
||||||
|
@ -321,12 +369,21 @@ process {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: KRAKENTOOLS_COMBINEKREPORTS {
|
withName: BRACKEN_COMBINEBRACKENOUTPUTS {
|
||||||
|
ext.prefix = { "bracken_${meta.id}_combined_reports" }
|
||||||
|
publishDir = [
|
||||||
|
path: { "${params.outdir}/bracken/" },
|
||||||
|
mode: params.publish_dir_mode,
|
||||||
|
pattern: '*.txt'
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
withName: KRAKENTOOLS_COMBINEKREPORTS_KRAKEN {
|
||||||
ext.prefix = { "kraken2_${meta.id}_combined_reports" }
|
ext.prefix = { "kraken2_${meta.id}_combined_reports" }
|
||||||
publishDir = [
|
publishDir = [
|
||||||
path: { "${params.outdir}/kraken2/" },
|
path: { "${params.outdir}/kraken2/" },
|
||||||
mode: params.publish_dir_mode,
|
mode: params.publish_dir_mode,
|
||||||
pattern: '*.{txt}'
|
pattern: '*.txt'
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -495,12 +552,4 @@ process {
|
||||||
saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
|
saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: 'EIDO_VALIDATE' {
|
|
||||||
ext.args = '--st-index sample'
|
|
||||||
}
|
|
||||||
|
|
||||||
withName: 'EIDO_CONVERT' {
|
|
||||||
ext.args = '--st-index sample'
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,10 +60,4 @@ process {
|
||||||
withName: MEGAN_RMA2INFO_KRONA {
|
withName: MEGAN_RMA2INFO_KRONA {
|
||||||
maxForks = 1
|
maxForks = 1
|
||||||
}
|
}
|
||||||
withName: 'EIDO_VALIDATE' {
|
|
||||||
ext.args = '--st-index sample'
|
|
||||||
}
|
|
||||||
withName: 'EIDO_CONVERT' {
|
|
||||||
ext.args = '--st-index sample'
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,10 +63,4 @@ process {
|
||||||
withName: MEGAN_RMA2INFO_KRONA {
|
withName: MEGAN_RMA2INFO_KRONA {
|
||||||
maxForks = 1
|
maxForks = 1
|
||||||
}
|
}
|
||||||
withName: 'EIDO_VALIDATE' {
|
|
||||||
ext.args = '--st-index sample'
|
|
||||||
}
|
|
||||||
withName: 'EIDO_CONVERT' {
|
|
||||||
ext.args = '--st-index sample'
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
params {
|
|
||||||
config_profile_name = 'Test PEP profile'
|
|
||||||
config_profile_description = 'Minimal test dataset to check pipeline function with PEP file as an input.'
|
|
||||||
|
|
||||||
// Limit resources so that this can run on GitHub Actions
|
|
||||||
max_cpus = 2
|
|
||||||
max_memory = '6.GB'
|
|
||||||
max_time = '6.h'
|
|
||||||
|
|
||||||
// Input data
|
|
||||||
input = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/pep/test_pep_format_files/config.yaml'
|
|
||||||
databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database.csv'
|
|
||||||
perform_shortread_qc = true
|
|
||||||
perform_longread_qc = true
|
|
||||||
perform_shortread_complexityfilter = true
|
|
||||||
perform_shortread_hostremoval = true
|
|
||||||
perform_longread_hostremoval = true
|
|
||||||
perform_runmerging = true
|
|
||||||
hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
|
|
||||||
run_kaiju = true
|
|
||||||
run_kraken2 = true
|
|
||||||
run_bracken = true
|
|
||||||
run_malt = true
|
|
||||||
run_metaphlan3 = true
|
|
||||||
run_centrifuge = true
|
|
||||||
run_diamond = true
|
|
||||||
run_motus = false
|
|
||||||
run_krona = true
|
|
||||||
krona_taxonomy_directory = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/sarscov2/metagenome/krona_taxonomy.tab'
|
|
||||||
malt_save_reads = true
|
|
||||||
kraken2_save_reads = true
|
|
||||||
centrifuge_save_reads = true
|
|
||||||
diamond_save_reads = true
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
process {
|
|
||||||
withName: MALT_RUN {
|
|
||||||
maxForks = 1
|
|
||||||
ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
|
|
||||||
}
|
|
||||||
withName: MEGAN_RMA2INFO {
|
|
||||||
maxForks = 1
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -12,7 +12,7 @@
|
||||||
|
|
||||||
nf-core/taxprofiler can accept as input raw or preprocessed single- or paired-end short-read (e.g. Illumina) FASTQ files, long-read FASTQ files (e.g. Oxford Nanopore), or FASTA sequences (available for a subset of profilers).
|
nf-core/taxprofiler can accept as input raw or preprocessed single- or paired-end short-read (e.g. Illumina) FASTQ files, long-read FASTQ files (e.g. Oxford Nanopore), or FASTA sequences (available for a subset of profilers).
|
||||||
|
|
||||||
> ⚠️ Input FASTQ files _must_ be gzipped, while FASTA files may optionally be uncompressed (although this is not recommended)
|
> ⚠️ Input FASTQ and FASTA files _must_ be gzipped
|
||||||
|
|
||||||
You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 6 columns, and a header row as shown in the examples below. Furthermother, nf-core/taxprofiler also requires a second comma-separated file of 3 columns with a header row as in the examples below.
|
You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 6 columns, and a header row as shown in the examples below. Furthermother, nf-core/taxprofiler also requires a second comma-separated file of 3 columns with a header row as in the examples below.
|
||||||
|
|
||||||
|
@ -22,10 +22,6 @@ This samplesheet is then specified on the command line as follows:
|
||||||
--input '[path to samplesheet file]' --databases '[path to database sheet file]'
|
--input '[path to samplesheet file]' --databases '[path to database sheet file]'
|
||||||
```
|
```
|
||||||
|
|
||||||
Note pipeline supports both CSV and PEP input sample sheets. Find out more [here](http://pep.databio.org/en/2.1.0/specification/).
|
|
||||||
When using PEP as an input, the `samplesheet.csv` must be placed in the same folder
|
|
||||||
as `config.yaml` file. A path to `samplesheet.csv` within the config must be absolute.
|
|
||||||
|
|
||||||
### Multiple runs of the same sample
|
### Multiple runs of the same sample
|
||||||
|
|
||||||
The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. The pipeline will concatenate different runs FASTQ files of the same sample before performing profiling, when `--perform_runmerging` is supplied. Below is an example for the same sample sequenced across 3 lanes:
|
The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. The pipeline will concatenate different runs FASTQ files of the same sample before performing profiling, when `--perform_runmerging` is supplied. Below is an example for the same sample sequenced across 3 lanes:
|
||||||
|
@ -312,9 +308,6 @@ If `-profile` is not specified, the pipeline will run locally and expect all sof
|
||||||
- `test`
|
- `test`
|
||||||
- A profile with a complete configuration for automated testing
|
- A profile with a complete configuration for automated testing
|
||||||
- Includes links to test data so needs no other parameters
|
- Includes links to test data so needs no other parameters
|
||||||
- `test_pep`
|
|
||||||
- A profile with a complete configuration for running a pipeline with PEP as input
|
|
||||||
- Includes links to test data so needs no other parameters
|
|
||||||
- `docker`
|
- `docker`
|
||||||
- A generic configuration profile to be used with [Docker](https://docker.com/)
|
- A generic configuration profile to be used with [Docker](https://docker.com/)
|
||||||
- `singularity`
|
- `singularity`
|
||||||
|
|
|
@ -81,7 +81,7 @@ class WorkflowMain {
|
||||||
|
|
||||||
// Check input has been provided
|
// Check input has been provided
|
||||||
if (!params.input) {
|
if (!params.input) {
|
||||||
log.error "Please provide an input samplesheet or PEP to the pipeline e.g. '--input samplesheet.csv'"
|
log.error "Please provide an input samplesheet to the pipeline e.g. '--input samplesheet.csv'"
|
||||||
System.exit(1)
|
System.exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
22
modules.json
22
modules.json
|
@ -30,6 +30,11 @@
|
||||||
"git_sha": "8cab56516076b23c6f8eb1ac20ba4ce9692c85e1",
|
"git_sha": "8cab56516076b23c6f8eb1ac20ba4ce9692c85e1",
|
||||||
"installed_by": ["modules"]
|
"installed_by": ["modules"]
|
||||||
},
|
},
|
||||||
|
"bracken/combinebrackenoutputs": {
|
||||||
|
"branch": "master",
|
||||||
|
"git_sha": "9c87d5fdad182590a370ea43a4ecebd200a6f6fb",
|
||||||
|
"installed_by": ["modules"]
|
||||||
|
},
|
||||||
"cat/fastq": {
|
"cat/fastq": {
|
||||||
"branch": "master",
|
"branch": "master",
|
||||||
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
||||||
|
@ -55,20 +60,11 @@
|
||||||
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
||||||
"installed_by": ["modules"]
|
"installed_by": ["modules"]
|
||||||
},
|
},
|
||||||
"eido/convert": {
|
|
||||||
"branch": "master",
|
|
||||||
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
|
||||||
"installed_by": ["modules"]
|
|
||||||
},
|
|
||||||
"eido/validate": {
|
|
||||||
"branch": "master",
|
|
||||||
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
|
||||||
"installed_by": ["modules"]
|
|
||||||
},
|
|
||||||
"falco": {
|
"falco": {
|
||||||
"branch": "master",
|
"branch": "master",
|
||||||
"git_sha": "fc959214036403ad83efe7a41d43d0606c445cda",
|
"git_sha": "fc959214036403ad83efe7a41d43d0606c445cda",
|
||||||
"installed_by": ["modules"]
|
"installed_by": ["modules"],
|
||||||
|
"patch": "modules/nf-core/falco/falco.diff"
|
||||||
},
|
},
|
||||||
"fastp": {
|
"fastp": {
|
||||||
"branch": "master",
|
"branch": "master",
|
||||||
|
@ -167,12 +163,12 @@
|
||||||
},
|
},
|
||||||
"motus/merge": {
|
"motus/merge": {
|
||||||
"branch": "master",
|
"branch": "master",
|
||||||
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
"git_sha": "3fce766123e71e82fb384db7d07b59180baa9ee9",
|
||||||
"installed_by": ["modules"]
|
"installed_by": ["modules"]
|
||||||
},
|
},
|
||||||
"motus/profile": {
|
"motus/profile": {
|
||||||
"branch": "master",
|
"branch": "master",
|
||||||
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
"git_sha": "3fce766123e71e82fb384db7d07b59180baa9ee9",
|
||||||
"installed_by": ["modules"]
|
"installed_by": ["modules"]
|
||||||
},
|
},
|
||||||
"multiqc": {
|
"multiqc": {
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
process DATABASE_CHECK {
|
process SAMPLESHEET_CHECK {
|
||||||
tag "$databasesheet"
|
tag "$samplesheet"
|
||||||
label 'process_single'
|
|
||||||
|
|
||||||
conda (params.enable_conda ? "conda-forge::python=3.8.3" : null)
|
conda (params.enable_conda ? "conda-forge::python=3.8.3" : null)
|
||||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||||
|
@ -8,18 +7,17 @@ process DATABASE_CHECK {
|
||||||
'quay.io/biocontainers/python:3.8.3' }"
|
'quay.io/biocontainers/python:3.8.3' }"
|
||||||
|
|
||||||
input:
|
input:
|
||||||
path databasesheet
|
path samplesheet
|
||||||
|
|
||||||
output:
|
output:
|
||||||
path '*.csv' , emit: csv
|
path '*.csv' , emit: csv
|
||||||
path "versions.yml", emit: versions
|
path "versions.yml", emit: versions
|
||||||
|
|
||||||
when:
|
|
||||||
task.ext.when == null || task.ext.when
|
|
||||||
|
|
||||||
script: // This script is bundled with the pipeline, in nf-core/taxprofiler/bin/
|
script: // This script is bundled with the pipeline, in nf-core/taxprofiler/bin/
|
||||||
"""
|
"""
|
||||||
cat $databasesheet >> database_sheet.valid.csv
|
check_samplesheet.py \\
|
||||||
|
$samplesheet \\
|
||||||
|
samplesheet.valid.csv
|
||||||
|
|
||||||
cat <<-END_VERSIONS > versions.yml
|
cat <<-END_VERSIONS > versions.yml
|
||||||
"${task.process}":
|
"${task.process}":
|
37
modules/nf-core/bracken/combinebrackenoutputs/main.nf
generated
Normal file
37
modules/nf-core/bracken/combinebrackenoutputs/main.nf
generated
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
process BRACKEN_COMBINEBRACKENOUTPUTS {
|
||||||
|
tag "$meta.id"
|
||||||
|
label 'process_low'
|
||||||
|
|
||||||
|
conda (params.enable_conda ? "bioconda::bracken=2.7" : null)
|
||||||
|
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||||
|
'https://depot.galaxyproject.org/singularity/bracken:2.7--py39hc16433a_0':
|
||||||
|
'quay.io/biocontainers/bracken:2.7--py39hc16433a_0' }"
|
||||||
|
|
||||||
|
input:
|
||||||
|
tuple val(meta), path(input)
|
||||||
|
|
||||||
|
output:
|
||||||
|
tuple val(meta), path("*.txt"), emit: txt
|
||||||
|
path "versions.yml" , emit: versions
|
||||||
|
|
||||||
|
when:
|
||||||
|
task.ext.when == null || task.ext.when
|
||||||
|
|
||||||
|
script:
|
||||||
|
def args = task.ext.args ?: ''
|
||||||
|
def prefix = task.ext.prefix ?: "${meta.id}"
|
||||||
|
// WARN: Version information not provided by tool on CLI.
|
||||||
|
// Please update version string below when bumping container versions.
|
||||||
|
def VERSION = '2.7'
|
||||||
|
"""
|
||||||
|
combine_bracken_outputs.py \\
|
||||||
|
$args \\
|
||||||
|
--files ${input} \\
|
||||||
|
-o ${prefix}.txt
|
||||||
|
|
||||||
|
cat <<-END_VERSIONS > versions.yml
|
||||||
|
"${task.process}":
|
||||||
|
combine_bracken_output: ${VERSION}
|
||||||
|
END_VERSIONS
|
||||||
|
"""
|
||||||
|
}
|
41
modules/nf-core/bracken/combinebrackenoutputs/meta.yml
generated
Normal file
41
modules/nf-core/bracken/combinebrackenoutputs/meta.yml
generated
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
name: "bracken_combinebrackenoutputs"
|
||||||
|
description: Combine output of metagenomic samples analyzed by bracken.
|
||||||
|
keywords:
|
||||||
|
- sort
|
||||||
|
tools:
|
||||||
|
- "bracken":
|
||||||
|
description: Bracken (Bayesian Reestimation of Abundance with KrakEN) is a highly accurate statistical method that computes the abundance of species in DNA sequences from a metagenomics sample.
|
||||||
|
homepage: https://ccb.jhu.edu/software/bracken/
|
||||||
|
documentation: https://ccb.jhu.edu/software/bracken/index.shtml?t=manual
|
||||||
|
tool_dev_url: https://github.com/jenniferlu717/Bracken
|
||||||
|
doi: "10.7717/peerj-cs.104"
|
||||||
|
licence: ["GPL v3"]
|
||||||
|
|
||||||
|
input:
|
||||||
|
- meta:
|
||||||
|
type: map
|
||||||
|
description: |
|
||||||
|
Groovy Map containing sample information
|
||||||
|
e.g. [ id:'test', single_end:false ]
|
||||||
|
- input:
|
||||||
|
type: file
|
||||||
|
description: List of output files from bracken
|
||||||
|
pattern: "*"
|
||||||
|
|
||||||
|
output:
|
||||||
|
- meta:
|
||||||
|
type: map
|
||||||
|
description: |
|
||||||
|
Groovy Map containing sample information
|
||||||
|
e.g. [ id:'test', single_end:false ]
|
||||||
|
- versions:
|
||||||
|
type: file
|
||||||
|
description: File containing software versions
|
||||||
|
pattern: "versions.yml"
|
||||||
|
- txt:
|
||||||
|
type: file
|
||||||
|
description: Combined output in table format
|
||||||
|
pattern: "*.txt"
|
||||||
|
|
||||||
|
authors:
|
||||||
|
- "@jfy133"
|
38
modules/nf-core/eido/convert/main.nf
generated
38
modules/nf-core/eido/convert/main.nf
generated
|
@ -1,38 +0,0 @@
|
||||||
process EIDO_CONVERT {
|
|
||||||
tag "$samplesheet"
|
|
||||||
label 'process_single'
|
|
||||||
|
|
||||||
conda (params.enable_conda ? "conda-forge::eido=0.1.9" : null)
|
|
||||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
|
||||||
'https://containers.biocontainers.pro/s3/SingImgsRepo/eido/0.1.9_cv1/eido_0.1.9_cv1.sif' :
|
|
||||||
'biocontainers/eido:0.1.9_cv1' }"
|
|
||||||
|
|
||||||
input:
|
|
||||||
path samplesheet
|
|
||||||
val format
|
|
||||||
path pep_input_base_dir
|
|
||||||
|
|
||||||
output:
|
|
||||||
path "versions.yml" , emit: versions
|
|
||||||
path "${prefix}.${format}" , emit: samplesheet_converted
|
|
||||||
|
|
||||||
when:
|
|
||||||
task.ext.when == null || task.ext.when
|
|
||||||
|
|
||||||
script:
|
|
||||||
def args = task.ext.args ?: ''
|
|
||||||
prefix = task.ext.prefix ?: "samplesheet_converted"
|
|
||||||
"""
|
|
||||||
eido \\
|
|
||||||
convert \\
|
|
||||||
-f $format \\
|
|
||||||
$samplesheet \\
|
|
||||||
$args \\
|
|
||||||
-p samples=${prefix}.${format}
|
|
||||||
|
|
||||||
cat <<-END_VERSIONS > versions.yml
|
|
||||||
"${task.process}":
|
|
||||||
eido: \$(echo \$(eido --version 2>&1) | sed 's/^.*eido //;s/ .*//' ))
|
|
||||||
END_VERSIONS
|
|
||||||
"""
|
|
||||||
}
|
|
39
modules/nf-core/eido/convert/meta.yml
generated
39
modules/nf-core/eido/convert/meta.yml
generated
|
@ -1,39 +0,0 @@
|
||||||
name: "eido_convert"
|
|
||||||
description: Convert any PEP project or Nextflow samplesheet to any format
|
|
||||||
keywords:
|
|
||||||
- eido
|
|
||||||
- convert
|
|
||||||
- PEP
|
|
||||||
- format
|
|
||||||
- samplesheet
|
|
||||||
tools:
|
|
||||||
- "eido":
|
|
||||||
description: "Convert any PEP project or Nextflow samplesheet to any format"
|
|
||||||
homepage: "http://eido.databio.org/en/latest/"
|
|
||||||
documentation: "http://eido.databio.org/en/latest/"
|
|
||||||
doi: "10.1093/gigascience/giab077"
|
|
||||||
licence: "BSD-2-Clause"
|
|
||||||
|
|
||||||
input:
|
|
||||||
- samplesheet:
|
|
||||||
type: file
|
|
||||||
description: Nextflow samplesheet or PEP project
|
|
||||||
pattern: "*.{yaml,yml,csv}"
|
|
||||||
- format:
|
|
||||||
type: value
|
|
||||||
description: Extension of an output file
|
|
||||||
- pep_input_base_dir:
|
|
||||||
type: file
|
|
||||||
description: Optional path to the directory where files specified in a PEP config file are stored. Any paths specified in the config will need to be relative to this base directory.
|
|
||||||
|
|
||||||
output:
|
|
||||||
- versions:
|
|
||||||
type: file
|
|
||||||
description: File containing software versions
|
|
||||||
pattern: "versions.yml"
|
|
||||||
- samplesheet_converted:
|
|
||||||
type: file
|
|
||||||
description: PEP project or samplesheet converted to csv file
|
|
||||||
|
|
||||||
authors:
|
|
||||||
- "@rafalstepien"
|
|
33
modules/nf-core/eido/validate/main.nf
generated
33
modules/nf-core/eido/validate/main.nf
generated
|
@ -1,33 +0,0 @@
|
||||||
process EIDO_VALIDATE {
|
|
||||||
tag "$samplesheet"
|
|
||||||
label 'process_single'
|
|
||||||
|
|
||||||
conda (params.enable_conda ? "conda-forge::eido=0.1.9" : null)
|
|
||||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
|
||||||
'https://containers.biocontainers.pro/s3/SingImgsRepo/eido/0.1.9_cv2/eido_0.1.9_cv2.sif' :
|
|
||||||
'biocontainers/eido:0.1.9_cv2' }"
|
|
||||||
|
|
||||||
input:
|
|
||||||
path samplesheet
|
|
||||||
path schema
|
|
||||||
path pep_input_base_dir
|
|
||||||
|
|
||||||
output:
|
|
||||||
path "versions.yml" , emit: versions
|
|
||||||
path "*.log" , emit: log
|
|
||||||
|
|
||||||
when:
|
|
||||||
task.ext.when == null || task.ext.when
|
|
||||||
|
|
||||||
script:
|
|
||||||
def args = task.ext.args ?: ''
|
|
||||||
def prefix = task.ext.prefix ?: "validation"
|
|
||||||
"""
|
|
||||||
eido validate $args $samplesheet -s $schema -e > ${prefix}.log
|
|
||||||
|
|
||||||
cat <<-END_VERSIONS > versions.yml
|
|
||||||
"${task.process}":
|
|
||||||
eido: \$(echo \$(eido --version 2>&1) | sed 's/^.*eido //;s/ .*//' ))
|
|
||||||
END_VERSIONS
|
|
||||||
"""
|
|
||||||
}
|
|
41
modules/nf-core/eido/validate/meta.yml
generated
41
modules/nf-core/eido/validate/meta.yml
generated
|
@ -1,41 +0,0 @@
|
||||||
name: "eido_validate"
|
|
||||||
description: Validate samplesheet or PEP config against a schema
|
|
||||||
keywords:
|
|
||||||
- eido
|
|
||||||
- validate
|
|
||||||
- schema
|
|
||||||
- format
|
|
||||||
- pep
|
|
||||||
tools:
|
|
||||||
- "validate":
|
|
||||||
description: "Validate samplesheet or PEP config against a schema."
|
|
||||||
homepage: "http://eido.databio.org/en/latest/"
|
|
||||||
documentation: "http://eido.databio.org/en/latest/"
|
|
||||||
doi: "10.1093/gigascience/giab077"
|
|
||||||
licence: "BSD-2-Clause"
|
|
||||||
|
|
||||||
input:
|
|
||||||
- samplesheet:
|
|
||||||
type: file
|
|
||||||
description: Samplesheet or PEP file to be validated
|
|
||||||
pattern: "*.{yaml,yml,csv}"
|
|
||||||
- schema:
|
|
||||||
type: file
|
|
||||||
description: Schema that the samplesheet will be validated against
|
|
||||||
pattern: "*.{yaml,yml}"
|
|
||||||
- pep_input_base_dir:
|
|
||||||
type: file
|
|
||||||
description: Optional path to the directory where files specified in a PEP config file are stored. Any paths specified in the config will need to be relative to this base directory.
|
|
||||||
|
|
||||||
output:
|
|
||||||
- versions:
|
|
||||||
type: file
|
|
||||||
description: File containing software versions
|
|
||||||
pattern: "versions.yml"
|
|
||||||
- log:
|
|
||||||
type: file
|
|
||||||
description: File containing validation log.
|
|
||||||
pattern: "*.log"
|
|
||||||
|
|
||||||
authors:
|
|
||||||
- "@rafalstepien"
|
|
16
modules/nf-core/falco/falco.diff
generated
Normal file
16
modules/nf-core/falco/falco.diff
generated
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
Changes in module 'nf-core/falco'
|
||||||
|
--- modules/nf-core/falco/main.nf
|
||||||
|
+++ modules/nf-core/falco/main.nf
|
||||||
|
@@ -33,7 +33,9 @@
|
||||||
|
"""
|
||||||
|
} else {
|
||||||
|
"""
|
||||||
|
- falco $args --threads $task.cpus ${reads}
|
||||||
|
+ [ ! -f ${prefix}_1.fastq.gz ] && ln -s ${reads[0]} ${prefix}_1.fastq.gz
|
||||||
|
+ [ ! -f ${prefix}_2.fastq.gz ] && ln -s ${reads[1]} ${prefix}_2.fastq.gz
|
||||||
|
+ falco $args --threads $task.cpus ${prefix}_1.fastq.gz ${prefix}_2.fastq.gz
|
||||||
|
|
||||||
|
cat <<-END_VERSIONS > versions.yml
|
||||||
|
"${task.process}":
|
||||||
|
|
||||||
|
************************************************************
|
4
modules/nf-core/falco/main.nf
generated
4
modules/nf-core/falco/main.nf
generated
|
@ -33,7 +33,9 @@ process FALCO {
|
||||||
"""
|
"""
|
||||||
} else {
|
} else {
|
||||||
"""
|
"""
|
||||||
falco $args --threads $task.cpus ${reads}
|
[ ! -f ${prefix}_1.fastq.gz ] && ln -s ${reads[0]} ${prefix}_1.fastq.gz
|
||||||
|
[ ! -f ${prefix}_2.fastq.gz ] && ln -s ${reads[1]} ${prefix}_2.fastq.gz
|
||||||
|
falco $args --threads $task.cpus ${prefix}_1.fastq.gz ${prefix}_2.fastq.gz
|
||||||
|
|
||||||
cat <<-END_VERSIONS > versions.yml
|
cat <<-END_VERSIONS > versions.yml
|
||||||
"${task.process}":
|
"${task.process}":
|
||||||
|
|
8
modules/nf-core/motus/merge/main.nf
generated
8
modules/nf-core/motus/merge/main.nf
generated
|
@ -1,13 +1,11 @@
|
||||||
VERSION = '3.0.1'
|
|
||||||
|
|
||||||
process MOTUS_MERGE {
|
process MOTUS_MERGE {
|
||||||
tag "$meta.id"
|
tag "$meta.id"
|
||||||
label 'process_single'
|
label 'process_single'
|
||||||
|
|
||||||
conda (params.enable_conda ? "bioconda::motus=3.0.1" : null)
|
conda (params.enable_conda ? "bioconda::motus=3.0.3" : null)
|
||||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||||
'https://depot.galaxyproject.org/singularity/motus:3.0.1--pyhdfd78af_0':
|
'https://depot.galaxyproject.org/singularity/motus:3.0.3--pyhdfd78af_0':
|
||||||
'quay.io/biocontainers/motus:3.0.1--pyhdfd78af_0' }"
|
'quay.io/biocontainers/motus:3.0.3--pyhdfd78af_0' }"
|
||||||
|
|
||||||
input:
|
input:
|
||||||
tuple val(meta), path(input)
|
tuple val(meta), path(input)
|
||||||
|
|
2
modules/nf-core/motus/merge/meta.yml
generated
2
modules/nf-core/motus/merge/meta.yml
generated
|
@ -14,7 +14,7 @@ tools:
|
||||||
homepage: "https://motu-tool.org/"
|
homepage: "https://motu-tool.org/"
|
||||||
documentation: "https://github.com/motu-tool/mOTUs/wiki"
|
documentation: "https://github.com/motu-tool/mOTUs/wiki"
|
||||||
tool_dev_url: "https://github.com/motu-tool/mOTUs"
|
tool_dev_url: "https://github.com/motu-tool/mOTUs"
|
||||||
doi: "10.1038/s41467-019-08844-4"
|
doi: "10.1186/s40168-022-01410-z"
|
||||||
licence: "['GPL v3']"
|
licence: "['GPL v3']"
|
||||||
|
|
||||||
input:
|
input:
|
||||||
|
|
6
modules/nf-core/motus/profile/main.nf
generated
6
modules/nf-core/motus/profile/main.nf
generated
|
@ -2,10 +2,10 @@ process MOTUS_PROFILE {
|
||||||
tag "$meta.id"
|
tag "$meta.id"
|
||||||
label 'process_medium'
|
label 'process_medium'
|
||||||
|
|
||||||
conda (params.enable_conda ? "bioconda::motus=3.0.1" : null)
|
conda (params.enable_conda ? "bioconda::motus=3.0.3" : null)
|
||||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||||
'https://depot.galaxyproject.org/singularity/motus:3.0.1--pyhdfd78af_0':
|
'https://depot.galaxyproject.org/singularity/motus:3.0.3--pyhdfd78af_0':
|
||||||
'quay.io/biocontainers/motus:3.0.1--pyhdfd78af_0' }"
|
'quay.io/biocontainers/motus:3.0.3--pyhdfd78af_0' }"
|
||||||
|
|
||||||
input:
|
input:
|
||||||
tuple val(meta), path(reads)
|
tuple val(meta), path(reads)
|
||||||
|
|
2
modules/nf-core/motus/profile/meta.yml
generated
2
modules/nf-core/motus/profile/meta.yml
generated
|
@ -11,7 +11,7 @@ tools:
|
||||||
homepage: "https://motu-tool.org/"
|
homepage: "https://motu-tool.org/"
|
||||||
documentation: "https://github.com/motu-tool/mOTUs/wiki"
|
documentation: "https://github.com/motu-tool/mOTUs/wiki"
|
||||||
tool_dev_url: "https://github.com/motu-tool/mOTUs"
|
tool_dev_url: "https://github.com/motu-tool/mOTUs"
|
||||||
doi: "10.1038/s41467-019-08844-4"
|
doi: "10.1186/s40168-022-01410-z"
|
||||||
licence: "['GPL v3']"
|
licence: "['GPL v3']"
|
||||||
|
|
||||||
input:
|
input:
|
||||||
|
|
|
@ -252,7 +252,6 @@ profiles {
|
||||||
test_nothing { includeConfig 'conf/test_nothing.config' }
|
test_nothing { includeConfig 'conf/test_nothing.config' }
|
||||||
test_motus { includeConfig 'conf/test_motus.config' }
|
test_motus { includeConfig 'conf/test_motus.config' }
|
||||||
test_krakenuniq { includeConfig 'conf/test_krakenuniq.config' }
|
test_krakenuniq { includeConfig 'conf/test_krakenuniq.config' }
|
||||||
test_pep { includeConfig 'conf/test_pep.config' }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
// Check input samplesheet and get read channels
|
// Check input samplesheet and get read channels
|
||||||
//
|
//
|
||||||
|
|
||||||
include { DATABASE_CHECK } from '../../modules/local/database_check'
|
|
||||||
include { UNTAR } from '../../modules/nf-core/untar/main'
|
include { UNTAR } from '../../modules/nf-core/untar/main'
|
||||||
|
|
||||||
workflow DB_CHECK {
|
workflow DB_CHECK {
|
||||||
|
@ -10,15 +9,27 @@ workflow DB_CHECK {
|
||||||
dbsheet // file: /path/to/dbsheet.csv
|
dbsheet // file: /path/to/dbsheet.csv
|
||||||
|
|
||||||
main:
|
main:
|
||||||
|
ch_versions = Channel.empty()
|
||||||
|
|
||||||
// TODO: make database sheet check
|
// special check to check _between_ rows, for which we must group rows together
|
||||||
// Checks:
|
// note: this will run in parallel to within-row validity, but we can assume this will run faster thus will fail first
|
||||||
// 1) no duplicates,
|
Channel.fromPath(dbsheet)
|
||||||
// 2) args do not have quotes, e.g. just `,,` and NOT `,"",`
|
.splitCsv ( header:true, sep:',' )
|
||||||
parsed_samplesheet = DATABASE_CHECK ( dbsheet )
|
.map {[it.tool, it.db_name] }
|
||||||
.csv
|
.groupTuple()
|
||||||
|
.map {
|
||||||
|
tool, db_name ->
|
||||||
|
def unique_names = db_name.unique(false)
|
||||||
|
if ( unique_names.size() < db_name.size() ) exit 1, "[nf-core/taxprofiler] ERROR: Each database for a tool must have a unique name, duplicated detected. Tool: ${tool}, Database name: ${unique_names}"
|
||||||
|
}
|
||||||
|
|
||||||
|
// normal checks for within-row validity, so can be moved to separate functions
|
||||||
|
parsed_samplesheet = Channel.fromPath(dbsheet)
|
||||||
.splitCsv ( header:true, sep:',' )
|
.splitCsv ( header:true, sep:',' )
|
||||||
.map { create_db_channels(it) }
|
.map {
|
||||||
|
validate_db_rows(it)
|
||||||
|
create_db_channels(it)
|
||||||
|
}
|
||||||
|
|
||||||
ch_dbs_for_untar = parsed_samplesheet
|
ch_dbs_for_untar = parsed_samplesheet
|
||||||
.branch {
|
.branch {
|
||||||
|
@ -29,12 +40,32 @@ workflow DB_CHECK {
|
||||||
// TODO Filter to only run UNTAR on DBs of tools actually using?
|
// TODO Filter to only run UNTAR on DBs of tools actually using?
|
||||||
// TODO make optional whether to save
|
// TODO make optional whether to save
|
||||||
UNTAR ( ch_dbs_for_untar.untar )
|
UNTAR ( ch_dbs_for_untar.untar )
|
||||||
|
ch_versions = ch_versions.mix(UNTAR.out.versions.first())
|
||||||
|
|
||||||
ch_final_dbs = ch_dbs_for_untar.skip.mix( UNTAR.out.untar )
|
ch_final_dbs = ch_dbs_for_untar.skip.mix( UNTAR.out.untar )
|
||||||
|
|
||||||
emit:
|
emit:
|
||||||
dbs = ch_final_dbs // channel: [ val(meta), [ db ] ]
|
dbs = ch_final_dbs // channel: [ val(meta), [ db ] ]
|
||||||
versions = DATABASE_CHECK.out.versions.mix(UNTAR.out.versions.first()) // channel: [ versions.yml ]
|
versions = ch_versions // channel: [ versions.yml ]
|
||||||
|
}
|
||||||
|
|
||||||
|
def validate_db_rows(LinkedHashMap row){
|
||||||
|
|
||||||
|
// check minimum number of columns
|
||||||
|
if (row.size() < 4) exit 1, "[nf-core/taxprofiler] ERROR: Invalid database input sheet - malformed row (e.g. missing column). See documentation for more information. Error in: ${row}"
|
||||||
|
|
||||||
|
// all columns there
|
||||||
|
def expected_headers = ['tool', 'db_name', 'db_params', 'db_path']
|
||||||
|
if ( !row.keySet().containsAll(expected_headers) ) exit 1, "[nf-core/taxprofiler] ERROR: Invalid database input sheet - malformed column names. Please check input TSV. Column names should be: ${expected_keys.join(", ")}"
|
||||||
|
|
||||||
|
// valid tools specified// TIFNISIH LIST
|
||||||
|
def expected_tools = [ "bracken", "centrifuge", "diamond", "kaiju", "kraken2", "krakenuniq", "malt", "metaphlan3", "motus" ]
|
||||||
|
if ( !expected_tools.contains(row.tool) ) exit 1, "[nf-core/taxprofiler] ERROR: Invalid tool name. Please see documentation for all supported profilers. Error in: ${row}"
|
||||||
|
|
||||||
|
// detect quotes in params
|
||||||
|
if ( row.db_params.contains('"') ) exit 1, "[nf-core/taxprofiler] ERROR: Invalid database db_params entry. No quotes allowed. Error in: ${row}"
|
||||||
|
if ( row.db_params.contains("'") ) exit 1, "[nf-core/taxprofiler] ERROR: Invalid database db_params entry. No quotes allowed. Error in: ${row}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def create_db_channels(LinkedHashMap row) {
|
def create_db_channels(LinkedHashMap row) {
|
||||||
|
@ -45,9 +76,11 @@ def create_db_channels(LinkedHashMap row) {
|
||||||
|
|
||||||
def array = []
|
def array = []
|
||||||
if (!file(row.db_path, type: 'dir').exists()) {
|
if (!file(row.db_path, type: 'dir').exists()) {
|
||||||
exit 1, "ERROR: Please check input samplesheet -> database could not be found!\n${row.db_path}"
|
exit 1, "ERROR: Please check input samplesheet -> database path could not be found!\n${row.db_path}"
|
||||||
}
|
}
|
||||||
array = [ meta, file(row.db_path) ]
|
array = [ meta, file(row.db_path) ]
|
||||||
|
|
||||||
return array
|
return array
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -2,62 +2,36 @@
|
||||||
// Check input samplesheet and get read channels
|
// Check input samplesheet and get read channels
|
||||||
//
|
//
|
||||||
|
|
||||||
include { EIDO_VALIDATE } from '../../modules/nf-core/eido/validate/main'
|
include { SAMPLESHEET_CHECK } from '../../modules/local/samplesheet_check'
|
||||||
include { EIDO_CONVERT } from '../../modules/nf-core/eido/convert/main'
|
|
||||||
|
|
||||||
workflow INPUT_CHECK {
|
workflow INPUT_CHECK {
|
||||||
take:
|
take:
|
||||||
samplesheet_or_pep_config // file: /path/to/samplesheet.csv or /path/to/pep/config.yaml
|
samplesheet // file: /path/to/samplesheet.csv
|
||||||
pep_input_base_dir
|
|
||||||
|
|
||||||
main:
|
main:
|
||||||
ch_versions = Channel.empty()
|
parsed_samplesheet = SAMPLESHEET_CHECK ( samplesheet )
|
||||||
|
.csv
|
||||||
EIDO_VALIDATE ( samplesheet_or_pep_config, file("$projectDir/assets/samplesheet_schema.yaml"), pep_input_base_dir )
|
|
||||||
ch_versions = ch_versions.mix(EIDO_VALIDATE.out.versions)
|
|
||||||
|
|
||||||
EIDO_CONVERT ( samplesheet_or_pep_config, "csv", pep_input_base_dir )
|
|
||||||
ch_versions = ch_versions.mix(EIDO_CONVERT.out.versions)
|
|
||||||
|
|
||||||
ch_parsed_samplesheet = EIDO_CONVERT.out.samplesheet_converted
|
|
||||||
.splitCsv ( header:true, sep:',' )
|
.splitCsv ( header:true, sep:',' )
|
||||||
.map { check_missing_and_singleend_autodetect(it) }
|
|
||||||
.branch {
|
.branch {
|
||||||
fasta: it['fasta'] != ''
|
fasta: it['fasta'] != ''
|
||||||
nanopore: it['instrument_platform'] == 'OXFORD_NANOPORE'
|
nanopore: it['instrument_platform'] == 'OXFORD_NANOPORE'
|
||||||
fastq: true
|
fastq: true
|
||||||
}
|
}
|
||||||
|
|
||||||
ch_parsed_samplesheet.fastq
|
fastq = parsed_samplesheet.fastq
|
||||||
.map { create_fastq_channel(it) }
|
.map { create_fastq_channel(it) }
|
||||||
.set { fastq }
|
|
||||||
|
|
||||||
ch_parsed_samplesheet.nanopore
|
nanopore = parsed_samplesheet.nanopore
|
||||||
.map { create_fastq_channel(it) }
|
.map { create_fastq_channel(it) }
|
||||||
.set { nanopore }
|
|
||||||
|
|
||||||
ch_parsed_samplesheet.fasta
|
fasta = parsed_samplesheet.fasta
|
||||||
.map { create_fasta_channel(it) }
|
.map { create_fasta_channel(it) }
|
||||||
.set { fasta }
|
|
||||||
|
|
||||||
emit:
|
emit:
|
||||||
fastq = fastq ?: [] // channel: [ val(meta), [ reads ] ]
|
fastq = fastq ?: [] // channel: [ val(meta), [ reads ] ]
|
||||||
nanopore = nanopore ?: [] // channel: [ val(meta), [ reads ] ]
|
nanopore = nanopore ?: [] // channel: [ val(meta), [ reads ] ]
|
||||||
fasta = fasta ?: [] // channel: [ val(meta), fasta ]
|
fasta = fasta ?: [] // channel: [ val(meta), fasta ]
|
||||||
versions = ch_versions // channel: [ versions.yml ]
|
versions = SAMPLESHEET_CHECK.out.versions // channel: [ versions.yml ]
|
||||||
}
|
|
||||||
|
|
||||||
// Function to validate input sheet and auto-detect R1/R2
|
|
||||||
def check_missing_and_singleend_autodetect(LinkedHashMap row) {
|
|
||||||
|
|
||||||
// Checks not supported by EIDO(?)
|
|
||||||
if ( ( row['fastq_1'] != "" || row['fastq_2'] != "" ) && row['fasta'] != "" ) { exit 1, "[nf-core/taxprofiler] ERROR: FastQ and FastA files cannot be specified together in the same library. Check input samplesheet! Check sample: ${row['sample']}" }
|
|
||||||
if ( row['fastq_1'] == "" && row['fastq_2'] != "" ) { exit 1, "[nf-core/taxprofiler] ERROR: Input samplesheet has a missing fastq_1 when fastq_2 is specified. Check sample: ${row['sample']}" }
|
|
||||||
|
|
||||||
single_end = row['fastq_2'] == "" ? true : false
|
|
||||||
row['single_end'] = single_end
|
|
||||||
|
|
||||||
return row
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function to get list of [ meta, [ fastq_1, fastq_2 ] ]
|
// Function to get list of [ meta, [ fastq_1, fastq_2 ] ]
|
||||||
|
@ -87,12 +61,11 @@ def create_fastq_channel(LinkedHashMap row) {
|
||||||
if (!file(row.fastq_2).exists()) {
|
if (!file(row.fastq_2).exists()) {
|
||||||
exit 1, "ERROR: Please check input samplesheet -> Read 2 FastQ file does not exist!\n${row.fastq_2}"
|
exit 1, "ERROR: Please check input samplesheet -> Read 2 FastQ file does not exist!\n${row.fastq_2}"
|
||||||
}
|
}
|
||||||
fastq_meta = [ meta, [ file(row.fastq_1), file(row.fastq_2) ] ]
|
fastq_meta = [ meta, [ file(row.fastq_1), file(row.fastq_2) ] ]
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return fastq_meta
|
return fastq_meta
|
||||||
|
|
||||||
}// Function to get list of [ meta, fasta ]
|
}// Function to get list of [ meta, fasta ]
|
||||||
def create_fasta_channel(LinkedHashMap row) {
|
def create_fasta_channel(LinkedHashMap row) {
|
||||||
def meta = [:]
|
def meta = [:]
|
||||||
|
|
|
@ -41,14 +41,14 @@ workflow PROFILING {
|
||||||
}
|
}
|
||||||
.combine(databases)
|
.combine(databases)
|
||||||
.branch {
|
.branch {
|
||||||
malt: it[2]['tool'] == 'malt'
|
|
||||||
kraken2: it[2]['tool'] == 'kraken2' || it[2]['tool'] == 'bracken' // to reuse the kraken module to produce the input data for bracken
|
|
||||||
metaphlan3: it[2]['tool'] == 'metaphlan3'
|
|
||||||
centrifuge: it[2]['tool'] == 'centrifuge'
|
centrifuge: it[2]['tool'] == 'centrifuge'
|
||||||
kaiju: it[2]['tool'] == 'kaiju'
|
|
||||||
diamond: it[2]['tool'] == 'diamond'
|
diamond: it[2]['tool'] == 'diamond'
|
||||||
motus: it[2]['tool'] == 'motus'
|
kaiju: it[2]['tool'] == 'kaiju'
|
||||||
|
kraken2: it[2]['tool'] == 'kraken2' || it[2]['tool'] == 'bracken' // to reuse the kraken module to produce the input data for bracken
|
||||||
krakenuniq: it[2]['tool'] == 'krakenuniq'
|
krakenuniq: it[2]['tool'] == 'krakenuniq'
|
||||||
|
malt: it[2]['tool'] == 'malt'
|
||||||
|
metaphlan3: it[2]['tool'] == 'metaphlan3'
|
||||||
|
motus: it[2]['tool'] == 'motus'
|
||||||
unknown: true
|
unknown: true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
// Standardise output files e.g. aggregation
|
// Standardise output files e.g. aggregation
|
||||||
//
|
//
|
||||||
|
|
||||||
|
include { BRACKEN_COMBINEBRACKENOUTPUTS } from '../../modules/nf-core/bracken/combinebrackenoutputs/main'
|
||||||
include { KAIJU_KAIJU2TABLE } from '../../modules/nf-core/kaiju/kaiju2table/main'
|
include { KAIJU_KAIJU2TABLE } from '../../modules/nf-core/kaiju/kaiju2table/main'
|
||||||
include { KRAKENTOOLS_COMBINEKREPORTS as KRAKENTOOLS_COMBINEKREPORTS_KRAKEN } from '../../modules/nf-core/krakentools/combinekreports/main'
|
include { KRAKENTOOLS_COMBINEKREPORTS as KRAKENTOOLS_COMBINEKREPORTS_KRAKEN } from '../../modules/nf-core/krakentools/combinekreports/main'
|
||||||
include { KRAKENTOOLS_COMBINEKREPORTS as KRAKENTOOLS_COMBINEKREPORTS_CENTRIFUGE } from '../../modules/nf-core/krakentools/combinekreports/main'
|
include { KRAKENTOOLS_COMBINEKREPORTS as KRAKENTOOLS_COMBINEKREPORTS_CENTRIFUGE } from '../../modules/nf-core/krakentools/combinekreports/main'
|
||||||
|
@ -25,10 +26,11 @@ workflow STANDARDISATION_PROFILES {
|
||||||
*/
|
*/
|
||||||
ch_input_profiles = profiles
|
ch_input_profiles = profiles
|
||||||
.branch {
|
.branch {
|
||||||
motus: it[0]['tool'] == 'motus'
|
bracken: it[0]['tool'] == 'bracken'
|
||||||
kraken2: it[0]['tool'] == 'kraken2'
|
|
||||||
centrifuge: it[0]['tool'] == 'centrifuge'
|
centrifuge: it[0]['tool'] == 'centrifuge'
|
||||||
|
kraken2: it[0]['tool'] == 'kraken2'
|
||||||
metaphlan3: it[0]['tool'] == 'metaphlan3'
|
metaphlan3: it[0]['tool'] == 'metaphlan3'
|
||||||
|
motus: it[0]['tool'] == 'motus'
|
||||||
unknown: true
|
unknown: true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +51,18 @@ workflow STANDARDISATION_PROFILES {
|
||||||
Standardise and aggregate
|
Standardise and aggregate
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// CENTRIFUGE
|
// Bracken
|
||||||
|
|
||||||
|
ch_profiles_for_bracken = ch_input_profiles.bracken
|
||||||
|
.map { [it[0]['db_name'], it[1]] }
|
||||||
|
.groupTuple()
|
||||||
|
.map {
|
||||||
|
[[id:it[0]], it[1]]
|
||||||
|
}
|
||||||
|
|
||||||
|
BRACKEN_COMBINEBRACKENOUTPUTS ( ch_profiles_for_bracken )
|
||||||
|
|
||||||
|
// CENTRIFUGE
|
||||||
|
|
||||||
// Collect and replace id for db_name for prefix
|
// Collect and replace id for db_name for prefix
|
||||||
// Have to sort by size to ensure first file actually has hits otherwise
|
// Have to sort by size to ensure first file actually has hits otherwise
|
||||||
|
|
|
@ -20,12 +20,11 @@ for (param in checkPathParamList) { if (param) { file(param, checkIfExists: true
|
||||||
// Check mandatory parameters
|
// Check mandatory parameters
|
||||||
if ( params.input ) {
|
if ( params.input ) {
|
||||||
ch_input = file(params.input, checkIfExists: true)
|
ch_input = file(params.input, checkIfExists: true)
|
||||||
pep_input_base_dir = file(params.input).extension.matches("yaml|yml") ? file(file(params.input).getParent(), checkIfExists: true) : []
|
|
||||||
} else {
|
} else {
|
||||||
exit 1, "Input samplesheet, or PEP config and base directory not specified"
|
exit 1, "Input samplesheet not specified"
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.databases) { ch_databases = file(params.databases) } else { exit 1, 'Input database sheet not specified!' }
|
if (params.databases) { ch_databases = file(params.databases, checkIfExists: true) } else { exit 1, 'Input database sheet not specified!' }
|
||||||
|
|
||||||
if (params.shortread_qc_mergepairs && params.run_malt ) log.warn "[nf-core/taxprofiler] MALT does not accept uncollapsed paired-reads. Pairs will be profiled as separate files."
|
if (params.shortread_qc_mergepairs && params.run_malt ) log.warn "[nf-core/taxprofiler] MALT does not accept uncollapsed paired-reads. Pairs will be profiled as separate files."
|
||||||
if (params.shortread_qc_includeunmerged && !params.shortread_qc_mergepairs) exit 1, "ERROR: [nf-core/taxprofiler] cannot include unmerged reads when merging is not turned on. Please specify --shortread_qc_mergepairs"
|
if (params.shortread_qc_includeunmerged && !params.shortread_qc_mergepairs) exit 1, "ERROR: [nf-core/taxprofiler] cannot include unmerged reads when merging is not turned on. Please specify --shortread_qc_mergepairs"
|
||||||
|
@ -115,7 +114,7 @@ workflow TAXPROFILER {
|
||||||
SUBWORKFLOW: Read in samplesheet, validate and stage input files
|
SUBWORKFLOW: Read in samplesheet, validate and stage input files
|
||||||
*/
|
*/
|
||||||
INPUT_CHECK (
|
INPUT_CHECK (
|
||||||
ch_input, pep_input_base_dir
|
ch_input
|
||||||
)
|
)
|
||||||
ch_versions = ch_versions.mix(INPUT_CHECK.out.versions)
|
ch_versions = ch_versions.mix(INPUT_CHECK.out.versions)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue