mirror of
https://github.com/MillironX/taxprofiler.git
synced 2024-11-25 06:09:54 +00:00
Merge pull request #187 from nf-core/revert-pep
Revert PEP validation back to python samplesheet check
This commit is contained in:
commit
25bb4304e8
18 changed files with 273 additions and 332 deletions
|
@ -77,8 +77,6 @@ On release, automated continuous integration tests run the pipeline on a full-si
|
|||
nextflow run nf-core/taxprofiler --input samplesheet.csv --databases database.csv --outdir <OUTDIR> --run_<TOOL1> --run_<TOOL1> -profile <docker/singularity/podman/shifter/charliecloud/conda/institute>
|
||||
```
|
||||
|
||||
Note pipeline supports both CSV and PEP input sample sheets. Find out more [here](http://pep.databio.org/en/2.1.0/specification/).
|
||||
|
||||
## Documentation
|
||||
|
||||
The nf-core/taxprofiler pipeline comes with documentation about the pipeline [usage](https://nf-co.re/taxprofiler/usage), [parameters](https://nf-co.re/taxprofiler/parameters) and [output](https://nf-co.re/taxprofiler/output).
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
description: A schema for validation of samplesheet.csv for taxprofiler pipeline.
|
||||
imports:
|
||||
- https://schema.databio.org/pep/2.1.0.yaml
|
||||
properties:
|
||||
samples:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
sample:
|
||||
type: string
|
||||
description: "Sample identifier."
|
||||
pattern: "^\\S*$"
|
||||
run_accession:
|
||||
type: string
|
||||
description: "Run accession number."
|
||||
instrument_platform:
|
||||
type: string
|
||||
description: "Name of the platform that sequenced the samples."
|
||||
enum:
|
||||
[
|
||||
"ABI_SOLID",
|
||||
"BGISEQ",
|
||||
"CAPILLARY",
|
||||
"COMPLETE_GENOMICS",
|
||||
"DNBSEQ",
|
||||
"HELICOS",
|
||||
"ILLUMINA",
|
||||
"ION_TORRENT",
|
||||
"LS454",
|
||||
"OXFORD_NANOPORE",
|
||||
"PACBIO_SMRT",
|
||||
]
|
||||
fastq1:
|
||||
type: ["string", "null"]
|
||||
description: "Optional FASTQ file for read 1 of paired-end sequenced libraries."
|
||||
pattern: "^[\\S]+.(fq\\.gz|fastq\\.gz)$"
|
||||
fastq2:
|
||||
type: ["string", "null"]
|
||||
description: "Optional FASTQ file for read 2 of paired-end sequenced libraries."
|
||||
pattern: "^[\\S]+.(fq\\.gz|fastq\\.gz)$"
|
||||
fasta:
|
||||
type: ["string", "null"]
|
||||
description: "Optional FASTA file."
|
||||
pattern: "^[\\S]+.(fa\\.gz|fasta\\.gz)$"
|
||||
required:
|
||||
- sample
|
||||
- run_accession
|
||||
- instrument_platform
|
||||
files:
|
||||
- fastq1
|
||||
- fastq2
|
||||
- fasta
|
||||
required:
|
||||
- samples
|
233
bin/check_samplesheet.py
Executable file
233
bin/check_samplesheet.py
Executable file
|
@ -0,0 +1,233 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
from distutils import extension
|
||||
import os
|
||||
import sys
|
||||
import errno
|
||||
import argparse
|
||||
|
||||
|
||||
def parse_args(args=None):
|
||||
Description = "Reformat nf-core/taxprofiler samplesheet file and check its contents."
|
||||
|
||||
Epilog = "Example usage: python check_samplesheet.py <FILE_IN> <FILE_OUT>"
|
||||
|
||||
parser = argparse.ArgumentParser(description=Description, epilog=Epilog)
|
||||
parser.add_argument("FILE_IN", help="Input samplesheet file.")
|
||||
parser.add_argument("FILE_OUT", help="Output file.")
|
||||
return parser.parse_args(args)
|
||||
|
||||
|
||||
def make_dir(path):
|
||||
if len(path) > 0:
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exception:
|
||||
if exception.errno != errno.EEXIST:
|
||||
raise exception
|
||||
|
||||
|
||||
def print_error(error, context="Line", context_str=""):
|
||||
error_str = "ERROR: Please check samplesheet -> {}".format(error)
|
||||
if context != "" and context_str != "":
|
||||
error_str = "ERROR: Please check samplesheet -> {}\n{}: '{}'".format(
|
||||
error, context.strip(), context_str.strip()
|
||||
)
|
||||
print(error_str)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def check_samplesheet(file_in, file_out):
|
||||
"""
|
||||
This function checks that the samplesheet follows the following structure:
|
||||
|
||||
sample,run_accession,instrument_platform,fastq_1,fastq_2,fasta
|
||||
2611,ERR5766174,ILLUMINA,,,ERX5474930_ERR5766174_1.fa.gz
|
||||
2612,ERR5766176,ILLUMINA,ERX5474932_ERR5766176_1.fastq.gz,ERX5474932_ERR5766176_2.fastq.gz,
|
||||
2612,ERR5766174,ILLUMINA,ERX5474936_ERR5766180_1.fastq.gz,,
|
||||
2613,ERR5766181,ILLUMINA,ERX5474937_ERR5766181_1.fastq.gz,ERX5474937_ERR5766181_2.fastq.gz,
|
||||
"""
|
||||
|
||||
FQ_EXTENSIONS = (".fq.gz", ".fastq.gz")
|
||||
FA_EXTENSIONS = (
|
||||
".fa.gz",
|
||||
".fasta.gz",
|
||||
".fna.gz",
|
||||
".fas.gz",
|
||||
)
|
||||
INSTRUMENT_PLATFORMS = [
|
||||
"ABI_SOLID",
|
||||
"BGISEQ",
|
||||
"CAPILLARY",
|
||||
"COMPLETE_GENOMICS",
|
||||
"DNBSEQ",
|
||||
"HELICOS",
|
||||
"ILLUMINA",
|
||||
"ION_TORRENT",
|
||||
"LS454",
|
||||
"OXFORD_NANOPORE",
|
||||
"PACBIO_SMRT",
|
||||
]
|
||||
|
||||
sample_mapping_dict = {}
|
||||
with open(file_in, "r") as fin:
|
||||
|
||||
## Check header
|
||||
MIN_COLS = 4
|
||||
HEADER = [
|
||||
"sample",
|
||||
"run_accession",
|
||||
"instrument_platform",
|
||||
"fastq_1",
|
||||
"fastq_2",
|
||||
"fasta",
|
||||
]
|
||||
header = [x.strip('"') for x in fin.readline().strip().split(",")]
|
||||
|
||||
## Check for missing mandatory columns
|
||||
missing_columns = list(set(HEADER) - set(header))
|
||||
if len(missing_columns) > 0:
|
||||
print(
|
||||
"ERROR: Missing required column header -> {}. Note some columns can otherwise be empty. See pipeline documentation (https://nf-co.re/taxprofiler/usage).".format(
|
||||
",".join(missing_columns)
|
||||
)
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
## Find locations of mandatory columns
|
||||
header_locs = {}
|
||||
for i in HEADER:
|
||||
header_locs[i] = header.index(i)
|
||||
|
||||
## Check sample entries
|
||||
for line in fin:
|
||||
|
||||
## Pull out only relevant columns for downstream checking
|
||||
line_parsed = [x.strip().strip('"') for x in line.strip().split(",")]
|
||||
|
||||
# Check valid number of columns per row
|
||||
if len(line_parsed) < len(HEADER):
|
||||
print_error(
|
||||
"Invalid number of columns (minimum = {})!".format(len(HEADER)),
|
||||
"Line",
|
||||
line,
|
||||
)
|
||||
num_cols = len([x for x in line_parsed if x])
|
||||
if num_cols < MIN_COLS:
|
||||
print_error(
|
||||
"Invalid number of populated columns (minimum = {})!".format(MIN_COLS),
|
||||
"Line",
|
||||
line,
|
||||
)
|
||||
|
||||
lspl = [line_parsed[i] for i in header_locs.values()]
|
||||
|
||||
## Check sample name entries
|
||||
|
||||
(
|
||||
sample,
|
||||
run_accession,
|
||||
instrument_platform,
|
||||
fastq_1,
|
||||
fastq_2,
|
||||
fasta,
|
||||
) = lspl[: len(HEADER)]
|
||||
sample = sample.replace(" ", "_")
|
||||
if not sample:
|
||||
print_error("Sample entry has not been specified!", "Line", line)
|
||||
|
||||
## Check FastQ file extension
|
||||
for fastq in [fastq_1, fastq_2]:
|
||||
if fastq:
|
||||
if fastq.find(" ") != -1:
|
||||
print_error("FastQ file contains spaces!", "Line", line)
|
||||
if not fastq.endswith(FQ_EXTENSIONS):
|
||||
print_error(
|
||||
f"FastQ file does not have extension {' or '.join(list(FQ_EXTENSIONS))} !",
|
||||
"Line",
|
||||
line,
|
||||
)
|
||||
if fasta:
|
||||
if fasta.find(" ") != -1:
|
||||
print_error("FastA file contains spaces!", "Line", line)
|
||||
if not fasta.endswith(FA_EXTENSIONS):
|
||||
print_error(
|
||||
f"FastA file does not have extension {' or '.join(list(FA_EXTENSIONS))}!",
|
||||
"Line",
|
||||
line,
|
||||
)
|
||||
sample_info = []
|
||||
|
||||
# Check run_accession
|
||||
if not run_accession:
|
||||
print_error("Run accession has not been specified!", "Line", line)
|
||||
else:
|
||||
sample_info.append(run_accession)
|
||||
|
||||
# Check instrument_platform
|
||||
if not instrument_platform:
|
||||
print_error("Instrument platform has not been specified!", "Line", line)
|
||||
else:
|
||||
if instrument_platform not in INSTRUMENT_PLATFORMS:
|
||||
print_error(
|
||||
f"Instrument platform {instrument_platform} is not supported! "
|
||||
f"List of supported platforms {', '.join(INSTRUMENT_PLATFORMS)}",
|
||||
"Line",
|
||||
line,
|
||||
)
|
||||
sample_info.append(instrument_platform)
|
||||
|
||||
## Auto-detect paired-end/single-end
|
||||
if sample and fastq_1 and fastq_2: ## Paired-end short reads
|
||||
sample_info.extend(["0", fastq_1, fastq_2, fasta])
|
||||
elif sample and fastq_1 and not fastq_2: ## Single-end short/long fastq reads
|
||||
sample_info.extend(["1", fastq_1, fastq_2, fasta])
|
||||
elif sample and fasta and not fastq_1 and not fastq_2: ## Single-end long reads
|
||||
sample_info.extend(["1", fastq_1, fastq_2, fasta])
|
||||
elif fasta and (fastq_1 or fastq_2):
|
||||
print_error(
|
||||
"FastQ and FastA files cannot be specified together in the same library!",
|
||||
"Line",
|
||||
line,
|
||||
)
|
||||
else:
|
||||
print_error("Invalid combination of columns provided!", "Line", line)
|
||||
|
||||
## Create sample mapping dictionary = { sample: [ run_accession, instrument_platform, single_end, fastq_1, fastq_2 , fasta ] }
|
||||
if sample not in sample_mapping_dict:
|
||||
sample_mapping_dict[sample] = [sample_info]
|
||||
else:
|
||||
if sample_info in sample_mapping_dict[sample]:
|
||||
print_error("Samplesheet contains duplicate rows!", "Line", line)
|
||||
else:
|
||||
sample_mapping_dict[sample].append(sample_info)
|
||||
|
||||
## Write validated samplesheet with appropriate columns
|
||||
HEADER_OUT = [
|
||||
"sample",
|
||||
"run_accession",
|
||||
"instrument_platform",
|
||||
"single_end",
|
||||
"fastq_1",
|
||||
"fastq_2",
|
||||
"fasta",
|
||||
]
|
||||
if len(sample_mapping_dict) > 0:
|
||||
out_dir = os.path.dirname(file_out)
|
||||
make_dir(out_dir)
|
||||
with open(file_out, "w") as fout:
|
||||
fout.write(",".join(HEADER_OUT) + "\n")
|
||||
for sample in sorted(sample_mapping_dict.keys()):
|
||||
for idx, val in enumerate(sample_mapping_dict[sample]):
|
||||
fout.write(f"{sample},{','.join(val)}\n")
|
||||
else:
|
||||
print_error("No entries to process!", "Samplesheet: {}".format(file_in))
|
||||
|
||||
|
||||
def main(args=None):
|
||||
args = parse_args(args)
|
||||
check_samplesheet(args.FILE_IN, args.FILE_OUT)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -552,12 +552,4 @@ process {
|
|||
saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
|
||||
]
|
||||
}
|
||||
|
||||
withName: 'EIDO_VALIDATE' {
|
||||
ext.args = '--st-index sample'
|
||||
}
|
||||
|
||||
withName: 'EIDO_CONVERT' {
|
||||
ext.args = '--st-index sample'
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,10 +60,4 @@ process {
|
|||
withName: MEGAN_RMA2INFO_KRONA {
|
||||
maxForks = 1
|
||||
}
|
||||
withName: 'EIDO_VALIDATE' {
|
||||
ext.args = '--st-index sample'
|
||||
}
|
||||
withName: 'EIDO_CONVERT' {
|
||||
ext.args = '--st-index sample'
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,10 +63,4 @@ process {
|
|||
withName: MEGAN_RMA2INFO_KRONA {
|
||||
maxForks = 1
|
||||
}
|
||||
withName: 'EIDO_VALIDATE' {
|
||||
ext.args = '--st-index sample'
|
||||
}
|
||||
withName: 'EIDO_CONVERT' {
|
||||
ext.args = '--st-index sample'
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
params {
|
||||
config_profile_name = 'Test PEP profile'
|
||||
config_profile_description = 'Minimal test dataset to check pipeline function with PEP file as an input.'
|
||||
|
||||
// Limit resources so that this can run on GitHub Actions
|
||||
max_cpus = 2
|
||||
max_memory = '6.GB'
|
||||
max_time = '6.h'
|
||||
|
||||
// Input data
|
||||
input = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/pep/test_pep_format_files/config.yaml'
|
||||
databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database.csv'
|
||||
perform_shortread_qc = true
|
||||
perform_longread_qc = true
|
||||
perform_shortread_complexityfilter = true
|
||||
perform_shortread_hostremoval = true
|
||||
perform_longread_hostremoval = true
|
||||
perform_runmerging = true
|
||||
hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
|
||||
run_kaiju = true
|
||||
run_kraken2 = true
|
||||
run_bracken = true
|
||||
run_malt = true
|
||||
run_metaphlan3 = true
|
||||
run_centrifuge = true
|
||||
run_diamond = true
|
||||
run_motus = false
|
||||
run_krona = true
|
||||
krona_taxonomy_directory = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/sarscov2/metagenome/krona_taxonomy.tab'
|
||||
malt_save_reads = true
|
||||
kraken2_save_reads = true
|
||||
centrifuge_save_reads = true
|
||||
diamond_save_reads = true
|
||||
}
|
||||
|
||||
|
||||
process {
|
||||
withName: MALT_RUN {
|
||||
maxForks = 1
|
||||
ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
|
||||
}
|
||||
withName: MEGAN_RMA2INFO {
|
||||
maxForks = 1
|
||||
}
|
||||
}
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
nf-core/taxprofiler can accept as input raw or preprocessed single- or paired-end short-read (e.g. Illumina) FASTQ files, long-read FASTQ files (e.g. Oxford Nanopore), or FASTA sequences (available for a subset of profilers).
|
||||
|
||||
> ⚠️ Input FASTQ files _must_ be gzipped, while FASTA files may optionally be uncompressed (although this is not recommended)
|
||||
> ⚠️ Input FASTQ and FASTA files _must_ be gzipped
|
||||
|
||||
You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 6 columns, and a header row as shown in the examples below. Furthermother, nf-core/taxprofiler also requires a second comma-separated file of 3 columns with a header row as in the examples below.
|
||||
|
||||
|
@ -22,10 +22,6 @@ This samplesheet is then specified on the command line as follows:
|
|||
--input '[path to samplesheet file]' --databases '[path to database sheet file]'
|
||||
```
|
||||
|
||||
Note pipeline supports both CSV and PEP input sample sheets. Find out more [here](http://pep.databio.org/en/2.1.0/specification/).
|
||||
When using PEP as an input, the `samplesheet.csv` must be placed in the same folder
|
||||
as `config.yaml` file. A path to `samplesheet.csv` within the config must be absolute.
|
||||
|
||||
### Multiple runs of the same sample
|
||||
|
||||
The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. The pipeline will concatenate different runs FASTQ files of the same sample before performing profiling, when `--perform_runmerging` is supplied. Below is an example for the same sample sequenced across 3 lanes:
|
||||
|
@ -312,9 +308,6 @@ If `-profile` is not specified, the pipeline will run locally and expect all sof
|
|||
- `test`
|
||||
- A profile with a complete configuration for automated testing
|
||||
- Includes links to test data so needs no other parameters
|
||||
- `test_pep`
|
||||
- A profile with a complete configuration for running a pipeline with PEP as input
|
||||
- Includes links to test data so needs no other parameters
|
||||
- `docker`
|
||||
- A generic configuration profile to be used with [Docker](https://docker.com/)
|
||||
- `singularity`
|
||||
|
|
|
@ -81,7 +81,7 @@ class WorkflowMain {
|
|||
|
||||
// Check input has been provided
|
||||
if (!params.input) {
|
||||
log.error "Please provide an input samplesheet or PEP to the pipeline e.g. '--input samplesheet.csv'"
|
||||
log.error "Please provide an input samplesheet to the pipeline e.g. '--input samplesheet.csv'"
|
||||
System.exit(1)
|
||||
}
|
||||
}
|
||||
|
|
10
modules.json
10
modules.json
|
@ -60,16 +60,6 @@
|
|||
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
||||
"installed_by": ["modules"]
|
||||
},
|
||||
"eido/convert": {
|
||||
"branch": "master",
|
||||
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
||||
"installed_by": ["modules"]
|
||||
},
|
||||
"eido/validate": {
|
||||
"branch": "master",
|
||||
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
|
||||
"installed_by": ["modules"]
|
||||
},
|
||||
"falco": {
|
||||
"branch": "master",
|
||||
"git_sha": "fc959214036403ad83efe7a41d43d0606c445cda",
|
||||
|
|
27
modules/local/samplesheet_check.nf
Normal file
27
modules/local/samplesheet_check.nf
Normal file
|
@ -0,0 +1,27 @@
|
|||
process SAMPLESHEET_CHECK {
|
||||
tag "$samplesheet"
|
||||
|
||||
conda (params.enable_conda ? "conda-forge::python=3.8.3" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://depot.galaxyproject.org/singularity/python:3.8.3' :
|
||||
'quay.io/biocontainers/python:3.8.3' }"
|
||||
|
||||
input:
|
||||
path samplesheet
|
||||
|
||||
output:
|
||||
path '*.csv' , emit: csv
|
||||
path "versions.yml", emit: versions
|
||||
|
||||
script: // This script is bundled with the pipeline, in nf-core/taxprofiler/bin/
|
||||
"""
|
||||
check_samplesheet.py \\
|
||||
$samplesheet \\
|
||||
samplesheet.valid.csv
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
python: \$(python --version | sed 's/Python //g')
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
38
modules/nf-core/eido/convert/main.nf
generated
38
modules/nf-core/eido/convert/main.nf
generated
|
@ -1,38 +0,0 @@
|
|||
process EIDO_CONVERT {
|
||||
tag "$samplesheet"
|
||||
label 'process_single'
|
||||
|
||||
conda (params.enable_conda ? "conda-forge::eido=0.1.9" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://containers.biocontainers.pro/s3/SingImgsRepo/eido/0.1.9_cv1/eido_0.1.9_cv1.sif' :
|
||||
'biocontainers/eido:0.1.9_cv1' }"
|
||||
|
||||
input:
|
||||
path samplesheet
|
||||
val format
|
||||
path pep_input_base_dir
|
||||
|
||||
output:
|
||||
path "versions.yml" , emit: versions
|
||||
path "${prefix}.${format}" , emit: samplesheet_converted
|
||||
|
||||
when:
|
||||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
prefix = task.ext.prefix ?: "samplesheet_converted"
|
||||
"""
|
||||
eido \\
|
||||
convert \\
|
||||
-f $format \\
|
||||
$samplesheet \\
|
||||
$args \\
|
||||
-p samples=${prefix}.${format}
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
eido: \$(echo \$(eido --version 2>&1) | sed 's/^.*eido //;s/ .*//' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
39
modules/nf-core/eido/convert/meta.yml
generated
39
modules/nf-core/eido/convert/meta.yml
generated
|
@ -1,39 +0,0 @@
|
|||
name: "eido_convert"
|
||||
description: Convert any PEP project or Nextflow samplesheet to any format
|
||||
keywords:
|
||||
- eido
|
||||
- convert
|
||||
- PEP
|
||||
- format
|
||||
- samplesheet
|
||||
tools:
|
||||
- "eido":
|
||||
description: "Convert any PEP project or Nextflow samplesheet to any format"
|
||||
homepage: "http://eido.databio.org/en/latest/"
|
||||
documentation: "http://eido.databio.org/en/latest/"
|
||||
doi: "10.1093/gigascience/giab077"
|
||||
licence: "BSD-2-Clause"
|
||||
|
||||
input:
|
||||
- samplesheet:
|
||||
type: file
|
||||
description: Nextflow samplesheet or PEP project
|
||||
pattern: "*.{yaml,yml,csv}"
|
||||
- format:
|
||||
type: value
|
||||
description: Extension of an output file
|
||||
- pep_input_base_dir:
|
||||
type: file
|
||||
description: Optional path to the directory where files specified in a PEP config file are stored. Any paths specified in the config will need to be relative to this base directory.
|
||||
|
||||
output:
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
- samplesheet_converted:
|
||||
type: file
|
||||
description: PEP project or samplesheet converted to csv file
|
||||
|
||||
authors:
|
||||
- "@rafalstepien"
|
33
modules/nf-core/eido/validate/main.nf
generated
33
modules/nf-core/eido/validate/main.nf
generated
|
@ -1,33 +0,0 @@
|
|||
process EIDO_VALIDATE {
|
||||
tag "$samplesheet"
|
||||
label 'process_single'
|
||||
|
||||
conda (params.enable_conda ? "conda-forge::eido=0.1.9" : null)
|
||||
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
|
||||
'https://containers.biocontainers.pro/s3/SingImgsRepo/eido/0.1.9_cv2/eido_0.1.9_cv2.sif' :
|
||||
'biocontainers/eido:0.1.9_cv2' }"
|
||||
|
||||
input:
|
||||
path samplesheet
|
||||
path schema
|
||||
path pep_input_base_dir
|
||||
|
||||
output:
|
||||
path "versions.yml" , emit: versions
|
||||
path "*.log" , emit: log
|
||||
|
||||
when:
|
||||
task.ext.when == null || task.ext.when
|
||||
|
||||
script:
|
||||
def args = task.ext.args ?: ''
|
||||
def prefix = task.ext.prefix ?: "validation"
|
||||
"""
|
||||
eido validate $args $samplesheet -s $schema -e > ${prefix}.log
|
||||
|
||||
cat <<-END_VERSIONS > versions.yml
|
||||
"${task.process}":
|
||||
eido: \$(echo \$(eido --version 2>&1) | sed 's/^.*eido //;s/ .*//' ))
|
||||
END_VERSIONS
|
||||
"""
|
||||
}
|
41
modules/nf-core/eido/validate/meta.yml
generated
41
modules/nf-core/eido/validate/meta.yml
generated
|
@ -1,41 +0,0 @@
|
|||
name: "eido_validate"
|
||||
description: Validate samplesheet or PEP config against a schema
|
||||
keywords:
|
||||
- eido
|
||||
- validate
|
||||
- schema
|
||||
- format
|
||||
- pep
|
||||
tools:
|
||||
- "validate":
|
||||
description: "Validate samplesheet or PEP config against a schema."
|
||||
homepage: "http://eido.databio.org/en/latest/"
|
||||
documentation: "http://eido.databio.org/en/latest/"
|
||||
doi: "10.1093/gigascience/giab077"
|
||||
licence: "BSD-2-Clause"
|
||||
|
||||
input:
|
||||
- samplesheet:
|
||||
type: file
|
||||
description: Samplesheet or PEP file to be validated
|
||||
pattern: "*.{yaml,yml,csv}"
|
||||
- schema:
|
||||
type: file
|
||||
description: Schema that the samplesheet will be validated against
|
||||
pattern: "*.{yaml,yml}"
|
||||
- pep_input_base_dir:
|
||||
type: file
|
||||
description: Optional path to the directory where files specified in a PEP config file are stored. Any paths specified in the config will need to be relative to this base directory.
|
||||
|
||||
output:
|
||||
- versions:
|
||||
type: file
|
||||
description: File containing software versions
|
||||
pattern: "versions.yml"
|
||||
- log:
|
||||
type: file
|
||||
description: File containing validation log.
|
||||
pattern: "*.log"
|
||||
|
||||
authors:
|
||||
- "@rafalstepien"
|
|
@ -252,7 +252,6 @@ profiles {
|
|||
test_nothing { includeConfig 'conf/test_nothing.config' }
|
||||
test_motus { includeConfig 'conf/test_motus.config' }
|
||||
test_krakenuniq { includeConfig 'conf/test_krakenuniq.config' }
|
||||
test_pep { includeConfig 'conf/test_pep.config' }
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -2,62 +2,36 @@
|
|||
// Check input samplesheet and get read channels
|
||||
//
|
||||
|
||||
include { EIDO_VALIDATE } from '../../modules/nf-core/eido/validate/main'
|
||||
include { EIDO_CONVERT } from '../../modules/nf-core/eido/convert/main'
|
||||
include { SAMPLESHEET_CHECK } from '../../modules/local/samplesheet_check'
|
||||
|
||||
workflow INPUT_CHECK {
|
||||
take:
|
||||
samplesheet_or_pep_config // file: /path/to/samplesheet.csv or /path/to/pep/config.yaml
|
||||
pep_input_base_dir
|
||||
samplesheet // file: /path/to/samplesheet.csv
|
||||
|
||||
main:
|
||||
ch_versions = Channel.empty()
|
||||
|
||||
EIDO_VALIDATE ( samplesheet_or_pep_config, file("$projectDir/assets/samplesheet_schema.yaml"), pep_input_base_dir )
|
||||
ch_versions = ch_versions.mix(EIDO_VALIDATE.out.versions)
|
||||
|
||||
EIDO_CONVERT ( samplesheet_or_pep_config, "csv", pep_input_base_dir )
|
||||
ch_versions = ch_versions.mix(EIDO_CONVERT.out.versions)
|
||||
|
||||
ch_parsed_samplesheet = EIDO_CONVERT.out.samplesheet_converted
|
||||
parsed_samplesheet = SAMPLESHEET_CHECK ( samplesheet )
|
||||
.csv
|
||||
.splitCsv ( header:true, sep:',' )
|
||||
.map { check_missing_and_singleend_autodetect(it) }
|
||||
.branch {
|
||||
fasta: it['fasta'] != ''
|
||||
nanopore: it['instrument_platform'] == 'OXFORD_NANOPORE'
|
||||
fastq: true
|
||||
}
|
||||
|
||||
ch_parsed_samplesheet.fastq
|
||||
fastq = parsed_samplesheet.fastq
|
||||
.map { create_fastq_channel(it) }
|
||||
.set { fastq }
|
||||
|
||||
ch_parsed_samplesheet.nanopore
|
||||
nanopore = parsed_samplesheet.nanopore
|
||||
.map { create_fastq_channel(it) }
|
||||
.set { nanopore }
|
||||
|
||||
ch_parsed_samplesheet.fasta
|
||||
fasta = parsed_samplesheet.fasta
|
||||
.map { create_fasta_channel(it) }
|
||||
.set { fasta }
|
||||
|
||||
emit:
|
||||
fastq = fastq ?: [] // channel: [ val(meta), [ reads ] ]
|
||||
nanopore = nanopore ?: [] // channel: [ val(meta), [ reads ] ]
|
||||
fasta = fasta ?: [] // channel: [ val(meta), fasta ]
|
||||
versions = ch_versions // channel: [ versions.yml ]
|
||||
}
|
||||
|
||||
// Function to validate input sheet and auto-detect R1/R2
|
||||
def check_missing_and_singleend_autodetect(LinkedHashMap row) {
|
||||
|
||||
// Checks not supported by EIDO(?)
|
||||
if ( ( row['fastq_1'] != "" || row['fastq_2'] != "" ) && row['fasta'] != "" ) { exit 1, "[nf-core/taxprofiler] ERROR: FastQ and FastA files cannot be specified together in the same library. Check input samplesheet! Check sample: ${row['sample']}" }
|
||||
if ( row['fastq_1'] == "" && row['fastq_2'] != "" ) { exit 1, "[nf-core/taxprofiler] ERROR: Input samplesheet has a missing fastq_1 when fastq_2 is specified. Check sample: ${row['sample']}" }
|
||||
|
||||
single_end = row['fastq_2'] == "" ? true : false
|
||||
row['single_end'] = single_end
|
||||
|
||||
return row
|
||||
versions = SAMPLESHEET_CHECK.out.versions // channel: [ versions.yml ]
|
||||
}
|
||||
|
||||
// Function to get list of [ meta, [ fastq_1, fastq_2 ] ]
|
||||
|
@ -92,7 +66,6 @@ def create_fastq_channel(LinkedHashMap row) {
|
|||
|
||||
}
|
||||
return fastq_meta
|
||||
|
||||
}// Function to get list of [ meta, fasta ]
|
||||
def create_fasta_channel(LinkedHashMap row) {
|
||||
def meta = [:]
|
||||
|
|
|
@ -20,9 +20,8 @@ for (param in checkPathParamList) { if (param) { file(param, checkIfExists: true
|
|||
// Check mandatory parameters
|
||||
if ( params.input ) {
|
||||
ch_input = file(params.input, checkIfExists: true)
|
||||
pep_input_base_dir = file(params.input).extension.matches("yaml|yml") ? file(file(params.input).getParent(), checkIfExists: true) : []
|
||||
} else {
|
||||
exit 1, "Input samplesheet, or PEP config and base directory not specified"
|
||||
exit 1, "Input samplesheet not specified"
|
||||
}
|
||||
|
||||
if (params.databases) { ch_databases = file(params.databases, checkIfExists: true) } else { exit 1, 'Input database sheet not specified!' }
|
||||
|
@ -115,7 +114,7 @@ workflow TAXPROFILER {
|
|||
SUBWORKFLOW: Read in samplesheet, validate and stage input files
|
||||
*/
|
||||
INPUT_CHECK (
|
||||
ch_input, pep_input_base_dir
|
||||
ch_input
|
||||
)
|
||||
ch_versions = ch_versions.mix(INPUT_CHECK.out.versions)
|
||||
|
||||
|
|
Loading…
Reference in a new issue