1
0
Fork 0
mirror of https://github.com/MillironX/taxprofiler.git synced 2024-11-22 08:59:55 +00:00

Merge branch 'nf-core:dev' into update_usage

This commit is contained in:
Sofia Stamouli 2022-12-22 11:09:50 +01:00 committed by GitHub
commit 2f291731c1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
26 changed files with 353 additions and 417 deletions

View file

@ -34,9 +34,9 @@ jobs:
id: prettier_status id: prettier_status
run: | run: |
if prettier --check ${GITHUB_WORKSPACE}; then if prettier --check ${GITHUB_WORKSPACE}; then
echo "name=result::pass" >> $GITHUB_OUTPUT echo "result=pass" >> $GITHUB_OUTPUT
else else
echo "name=result::fail" >> $GITHUB_OUTPUT echo "result=fail" >> $GITHUB_OUTPUT
fi fi
- name: Run 'prettier --write' - name: Run 'prettier --write'

View file

@ -18,7 +18,7 @@ jobs:
- name: Get PR number - name: Get PR number
id: pr_number id: pr_number
run: echo "name=pr_number::$(cat linting-logs/PR_number.txt)" >> $GITHUB_OUTPUT run: echo "pr_number=$(cat linting-logs/PR_number.txt)" >> $GITHUB_OUTPUT
- name: Post PR comment - name: Post PR comment
uses: marocchino/sticky-pull-request-comment@v2 uses: marocchino/sticky-pull-request-comment@v2

View file

@ -77,8 +77,6 @@ On release, automated continuous integration tests run the pipeline on a full-si
nextflow run nf-core/taxprofiler --input samplesheet.csv --databases database.csv --outdir <OUTDIR> --run_<TOOL1> --run_<TOOL1> -profile <docker/singularity/podman/shifter/charliecloud/conda/institute> nextflow run nf-core/taxprofiler --input samplesheet.csv --databases database.csv --outdir <OUTDIR> --run_<TOOL1> --run_<TOOL1> -profile <docker/singularity/podman/shifter/charliecloud/conda/institute>
``` ```
Note pipeline supports both CSV and PEP input sample sheets. Find out more [here](http://pep.databio.org/en/2.1.0/specification/).
## Documentation ## Documentation
The nf-core/taxprofiler pipeline comes with documentation about the pipeline [usage](https://nf-co.re/taxprofiler/usage), [parameters](https://nf-co.re/taxprofiler/parameters) and [output](https://nf-co.re/taxprofiler/output). The nf-core/taxprofiler pipeline comes with documentation about the pipeline [usage](https://nf-co.re/taxprofiler/usage), [parameters](https://nf-co.re/taxprofiler/parameters) and [output](https://nf-co.re/taxprofiler/output).

View file

@ -1,55 +0,0 @@
description: A schema for validation of samplesheet.csv for taxprofiler pipeline.
imports:
- https://schema.databio.org/pep/2.1.0.yaml
properties:
samples:
type: array
items:
type: object
properties:
sample:
type: string
description: "Sample identifier."
pattern: "^\\S*$"
run_accession:
type: string
description: "Run accession number."
instrument_platform:
type: string
description: "Name of the platform that sequenced the samples."
enum:
[
"ABI_SOLID",
"BGISEQ",
"CAPILLARY",
"COMPLETE_GENOMICS",
"DNBSEQ",
"HELICOS",
"ILLUMINA",
"ION_TORRENT",
"LS454",
"OXFORD_NANOPORE",
"PACBIO_SMRT",
]
fastq1:
type: ["string", "null"]
description: "Optional FASTQ file for read 1 of paired-end sequenced libraries."
pattern: "^[\\S]+.(fq\\.gz|fastq\\.gz)$"
fastq2:
type: ["string", "null"]
description: "Optional FASTQ file for read 2 of paired-end sequenced libraries."
pattern: "^[\\S]+.(fq\\.gz|fastq\\.gz)$"
fasta:
type: ["string", "null"]
description: "Optional FASTA file."
pattern: "^[\\S]+.(fa\\.gz|fasta\\.gz)$"
required:
- sample
- run_accession
- instrument_platform
files:
- fastq1
- fastq2
- fasta
required:
- samples

233
bin/check_samplesheet.py Executable file
View file

@ -0,0 +1,233 @@
#!/usr/bin/env python
from distutils import extension
import os
import sys
import errno
import argparse
def parse_args(args=None):
Description = "Reformat nf-core/taxprofiler samplesheet file and check its contents."
Epilog = "Example usage: python check_samplesheet.py <FILE_IN> <FILE_OUT>"
parser = argparse.ArgumentParser(description=Description, epilog=Epilog)
parser.add_argument("FILE_IN", help="Input samplesheet file.")
parser.add_argument("FILE_OUT", help="Output file.")
return parser.parse_args(args)
def make_dir(path):
if len(path) > 0:
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise exception
def print_error(error, context="Line", context_str=""):
error_str = "ERROR: Please check samplesheet -> {}".format(error)
if context != "" and context_str != "":
error_str = "ERROR: Please check samplesheet -> {}\n{}: '{}'".format(
error, context.strip(), context_str.strip()
)
print(error_str)
sys.exit(1)
def check_samplesheet(file_in, file_out):
"""
This function checks that the samplesheet follows the following structure:
sample,run_accession,instrument_platform,fastq_1,fastq_2,fasta
2611,ERR5766174,ILLUMINA,,,ERX5474930_ERR5766174_1.fa.gz
2612,ERR5766176,ILLUMINA,ERX5474932_ERR5766176_1.fastq.gz,ERX5474932_ERR5766176_2.fastq.gz,
2612,ERR5766174,ILLUMINA,ERX5474936_ERR5766180_1.fastq.gz,,
2613,ERR5766181,ILLUMINA,ERX5474937_ERR5766181_1.fastq.gz,ERX5474937_ERR5766181_2.fastq.gz,
"""
FQ_EXTENSIONS = (".fq.gz", ".fastq.gz")
FA_EXTENSIONS = (
".fa.gz",
".fasta.gz",
".fna.gz",
".fas.gz",
)
INSTRUMENT_PLATFORMS = [
"ABI_SOLID",
"BGISEQ",
"CAPILLARY",
"COMPLETE_GENOMICS",
"DNBSEQ",
"HELICOS",
"ILLUMINA",
"ION_TORRENT",
"LS454",
"OXFORD_NANOPORE",
"PACBIO_SMRT",
]
sample_mapping_dict = {}
with open(file_in, "r") as fin:
## Check header
MIN_COLS = 4
HEADER = [
"sample",
"run_accession",
"instrument_platform",
"fastq_1",
"fastq_2",
"fasta",
]
header = [x.strip('"') for x in fin.readline().strip().split(",")]
## Check for missing mandatory columns
missing_columns = list(set(HEADER) - set(header))
if len(missing_columns) > 0:
print(
"ERROR: Missing required column header -> {}. Note some columns can otherwise be empty. See pipeline documentation (https://nf-co.re/taxprofiler/usage).".format(
",".join(missing_columns)
)
)
sys.exit(1)
## Find locations of mandatory columns
header_locs = {}
for i in HEADER:
header_locs[i] = header.index(i)
## Check sample entries
for line in fin:
## Pull out only relevant columns for downstream checking
line_parsed = [x.strip().strip('"') for x in line.strip().split(",")]
# Check valid number of columns per row
if len(line_parsed) < len(HEADER):
print_error(
"Invalid number of columns (minimum = {})!".format(len(HEADER)),
"Line",
line,
)
num_cols = len([x for x in line_parsed if x])
if num_cols < MIN_COLS:
print_error(
"Invalid number of populated columns (minimum = {})!".format(MIN_COLS),
"Line",
line,
)
lspl = [line_parsed[i] for i in header_locs.values()]
## Check sample name entries
(
sample,
run_accession,
instrument_platform,
fastq_1,
fastq_2,
fasta,
) = lspl[: len(HEADER)]
sample = sample.replace(" ", "_")
if not sample:
print_error("Sample entry has not been specified!", "Line", line)
## Check FastQ file extension
for fastq in [fastq_1, fastq_2]:
if fastq:
if fastq.find(" ") != -1:
print_error("FastQ file contains spaces!", "Line", line)
if not fastq.endswith(FQ_EXTENSIONS):
print_error(
f"FastQ file does not have extension {' or '.join(list(FQ_EXTENSIONS))} !",
"Line",
line,
)
if fasta:
if fasta.find(" ") != -1:
print_error("FastA file contains spaces!", "Line", line)
if not fasta.endswith(FA_EXTENSIONS):
print_error(
f"FastA file does not have extension {' or '.join(list(FA_EXTENSIONS))}!",
"Line",
line,
)
sample_info = []
# Check run_accession
if not run_accession:
print_error("Run accession has not been specified!", "Line", line)
else:
sample_info.append(run_accession)
# Check instrument_platform
if not instrument_platform:
print_error("Instrument platform has not been specified!", "Line", line)
else:
if instrument_platform not in INSTRUMENT_PLATFORMS:
print_error(
f"Instrument platform {instrument_platform} is not supported! "
f"List of supported platforms {', '.join(INSTRUMENT_PLATFORMS)}",
"Line",
line,
)
sample_info.append(instrument_platform)
## Auto-detect paired-end/single-end
if sample and fastq_1 and fastq_2: ## Paired-end short reads
sample_info.extend(["0", fastq_1, fastq_2, fasta])
elif sample and fastq_1 and not fastq_2: ## Single-end short/long fastq reads
sample_info.extend(["1", fastq_1, fastq_2, fasta])
elif sample and fasta and not fastq_1 and not fastq_2: ## Single-end long reads
sample_info.extend(["1", fastq_1, fastq_2, fasta])
elif fasta and (fastq_1 or fastq_2):
print_error(
"FastQ and FastA files cannot be specified together in the same library!",
"Line",
line,
)
else:
print_error("Invalid combination of columns provided!", "Line", line)
## Create sample mapping dictionary = { sample: [ run_accession, instrument_platform, single_end, fastq_1, fastq_2 , fasta ] }
if sample not in sample_mapping_dict:
sample_mapping_dict[sample] = [sample_info]
else:
if sample_info in sample_mapping_dict[sample]:
print_error("Samplesheet contains duplicate rows!", "Line", line)
else:
sample_mapping_dict[sample].append(sample_info)
## Write validated samplesheet with appropriate columns
HEADER_OUT = [
"sample",
"run_accession",
"instrument_platform",
"single_end",
"fastq_1",
"fastq_2",
"fasta",
]
if len(sample_mapping_dict) > 0:
out_dir = os.path.dirname(file_out)
make_dir(out_dir)
with open(file_out, "w") as fout:
fout.write(",".join(HEADER_OUT) + "\n")
for sample in sorted(sample_mapping_dict.keys()):
for idx, val in enumerate(sample_mapping_dict[sample]):
fout.write(f"{sample},{','.join(val)}\n")
else:
print_error("No entries to process!", "Samplesheet: {}".format(file_in))
def main(args=None):
args = parse_args(args)
check_samplesheet(args.FILE_IN, args.FILE_OUT)
if __name__ == "__main__":
sys.exit(main())

View file

@ -552,12 +552,4 @@ process {
saveAs: { filename -> filename.equals('versions.yml') ? null : filename } saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
] ]
} }
withName: 'EIDO_VALIDATE' {
ext.args = '--st-index sample'
}
withName: 'EIDO_CONVERT' {
ext.args = '--st-index sample'
}
} }

View file

@ -60,10 +60,4 @@ process {
withName: MEGAN_RMA2INFO_KRONA { withName: MEGAN_RMA2INFO_KRONA {
maxForks = 1 maxForks = 1
} }
withName: 'EIDO_VALIDATE' {
ext.args = '--st-index sample'
}
withName: 'EIDO_CONVERT' {
ext.args = '--st-index sample'
}
} }

View file

@ -63,10 +63,4 @@ process {
withName: MEGAN_RMA2INFO_KRONA { withName: MEGAN_RMA2INFO_KRONA {
maxForks = 1 maxForks = 1
} }
withName: 'EIDO_VALIDATE' {
ext.args = '--st-index sample'
}
withName: 'EIDO_CONVERT' {
ext.args = '--st-index sample'
}
} }

View file

@ -1,45 +0,0 @@
params {
config_profile_name = 'Test PEP profile'
config_profile_description = 'Minimal test dataset to check pipeline function with PEP file as an input.'
// Limit resources so that this can run on GitHub Actions
max_cpus = 2
max_memory = '6.GB'
max_time = '6.h'
// Input data
input = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/delete_me/pep/test_pep_format_files/config.yaml'
databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database.csv'
perform_shortread_qc = true
perform_longread_qc = true
perform_shortread_complexityfilter = true
perform_shortread_hostremoval = true
perform_longread_hostremoval = true
perform_runmerging = true
hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
run_kaiju = true
run_kraken2 = true
run_bracken = true
run_malt = true
run_metaphlan3 = true
run_centrifuge = true
run_diamond = true
run_motus = false
run_krona = true
krona_taxonomy_directory = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/sarscov2/metagenome/krona_taxonomy.tab'
malt_save_reads = true
kraken2_save_reads = true
centrifuge_save_reads = true
diamond_save_reads = true
}
process {
withName: MALT_RUN {
maxForks = 1
ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
}
withName: MEGAN_RMA2INFO {
maxForks = 1
}
}

View file

@ -12,7 +12,7 @@
nf-core/taxprofiler can accept as input raw or preprocessed single- or paired-end short-read (e.g. Illumina) FASTQ files, long-read FASTQ files (e.g. Oxford Nanopore), or FASTA sequences (available for a subset of profilers). nf-core/taxprofiler can accept as input raw or preprocessed single- or paired-end short-read (e.g. Illumina) FASTQ files, long-read FASTQ files (e.g. Oxford Nanopore), or FASTA sequences (available for a subset of profilers).
> ⚠️ Input FASTQ files _must_ be gzipped, while FASTA files may optionally be uncompressed (although this is not recommended) > ⚠️ Input FASTQ and FASTA files _must_ be gzipped
You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 6 columns, and a header row as shown in the examples below. Furthermother, nf-core/taxprofiler also requires a second comma-separated file of 3 columns with a header row as in the examples below. You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 6 columns, and a header row as shown in the examples below. Furthermother, nf-core/taxprofiler also requires a second comma-separated file of 3 columns with a header row as in the examples below.
@ -22,10 +22,6 @@ This samplesheet is then specified on the command line as follows:
--input '[path to samplesheet file]' --databases '[path to database sheet file]' --input '[path to samplesheet file]' --databases '[path to database sheet file]'
``` ```
Note pipeline supports both CSV and PEP input sample sheets. Find out more [here](http://pep.databio.org/en/2.1.0/specification/).
When using PEP as an input, the `samplesheet.csv` must be placed in the same folder
as `config.yaml` file. A path to `samplesheet.csv` within the config must be absolute.
### Multiple runs of the same sample ### Multiple runs of the same sample
The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. The pipeline will concatenate different runs FASTQ files of the same sample before performing profiling, when `--perform_runmerging` is supplied. Below is an example for the same sample sequenced across 3 lanes: The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. The pipeline will concatenate different runs FASTQ files of the same sample before performing profiling, when `--perform_runmerging` is supplied. Below is an example for the same sample sequenced across 3 lanes:
@ -271,9 +267,6 @@ If `-profile` is not specified, the pipeline will run locally and expect all sof
- `test` - `test`
- A profile with a complete configuration for automated testing - A profile with a complete configuration for automated testing
- Includes links to test data so needs no other parameters - Includes links to test data so needs no other parameters
- `test_pep`
- A profile with a complete configuration for running a pipeline with PEP as input
- Includes links to test data so needs no other parameters
- `docker` - `docker`
- A generic configuration profile to be used with [Docker](https://docker.com/) - A generic configuration profile to be used with [Docker](https://docker.com/)
- `singularity` - `singularity`

View file

@ -72,7 +72,7 @@ class WorkflowMain {
NfcoreTemplate.checkConfigProvided(workflow, log) NfcoreTemplate.checkConfigProvided(workflow, log)
// Check that conda channels are set-up correctly // Check that conda channels are set-up correctly
if (params.enable_conda) { if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) {
Utils.checkCondaChannels(log) Utils.checkCondaChannels(log)
} }
@ -81,7 +81,7 @@ class WorkflowMain {
// Check input has been provided // Check input has been provided
if (!params.input) { if (!params.input) {
log.error "Please provide an input samplesheet or PEP to the pipeline e.g. '--input samplesheet.csv'" log.error "Please provide an input samplesheet to the pipeline e.g. '--input samplesheet.csv'"
System.exit(1) System.exit(1)
} }
} }

View file

@ -52,7 +52,7 @@
}, },
"custom/dumpsoftwareversions": { "custom/dumpsoftwareversions": {
"branch": "master", "branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905", "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
"installed_by": ["modules"] "installed_by": ["modules"]
}, },
"diamond/blastx": { "diamond/blastx": {
@ -60,16 +60,6 @@
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905", "git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
"installed_by": ["modules"] "installed_by": ["modules"]
}, },
"eido/convert": {
"branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
"installed_by": ["modules"]
},
"eido/validate": {
"branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905",
"installed_by": ["modules"]
},
"falco": { "falco": {
"branch": "master", "branch": "master",
"git_sha": "fc959214036403ad83efe7a41d43d0606c445cda", "git_sha": "fc959214036403ad83efe7a41d43d0606c445cda",
@ -83,7 +73,7 @@
}, },
"fastqc": { "fastqc": {
"branch": "master", "branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905", "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
"installed_by": ["modules"] "installed_by": ["modules"]
}, },
"filtlong": { "filtlong": {
@ -128,7 +118,7 @@
}, },
"krakenuniq/preloadedkrakenuniq": { "krakenuniq/preloadedkrakenuniq": {
"branch": "master", "branch": "master",
"git_sha": "05649975c6611c6e007537a7984e186e12ae03af", "git_sha": "a6eb17f65b3ee5761c25c075a6166c9f76733cee",
"installed_by": ["modules"] "installed_by": ["modules"]
}, },
"krona/ktimporttaxonomy": { "krona/ktimporttaxonomy": {
@ -183,7 +173,7 @@
}, },
"multiqc": { "multiqc": {
"branch": "master", "branch": "master",
"git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905", "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
"installed_by": ["modules"] "installed_by": ["modules"]
}, },
"porechop/porechop": { "porechop/porechop": {

View file

@ -0,0 +1,27 @@
process SAMPLESHEET_CHECK {
tag "$samplesheet"
conda "conda-forge::python=3.8.3"
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/python:3.8.3' :
'quay.io/biocontainers/python:3.8.3' }"
input:
path samplesheet
output:
path '*.csv' , emit: csv
path "versions.yml", emit: versions
script: // This script is bundled with the pipeline, in nf-core/taxprofiler/bin/
"""
check_samplesheet.py \\
$samplesheet \\
samplesheet.valid.csv
cat <<-END_VERSIONS > versions.yml
"${task.process}":
python: \$(python --version | sed 's/Python //g')
END_VERSIONS
"""
}

View file

@ -2,7 +2,7 @@ process CUSTOM_DUMPSOFTWAREVERSIONS {
label 'process_single' label 'process_single'
// Requires `pyyaml` which does not have a dedicated container but is in the MultiQC container // Requires `pyyaml` which does not have a dedicated container but is in the MultiQC container
conda (params.enable_conda ? 'bioconda::multiqc=1.13' : null) conda "bioconda::multiqc=1.13"
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' : 'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' :
'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }" 'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }"

View file

@ -1,11 +1,17 @@
#!/usr/bin/env python #!/usr/bin/env python
import yaml
"""Provide functions to merge multiple versions.yml files."""
import platform import platform
from textwrap import dedent from textwrap import dedent
import yaml
def _make_versions_html(versions): def _make_versions_html(versions):
"""Generate a tabular HTML output of all versions for MultiQC."""
html = [ html = [
dedent( dedent(
"""\\ """\\
@ -44,47 +50,53 @@ def _make_versions_html(versions):
return "\\n".join(html) return "\\n".join(html)
versions_this_module = {} def main():
versions_this_module["${task.process}"] = { """Load all version files and generate merged output."""
"python": platform.python_version(), versions_this_module = {}
"yaml": yaml.__version__, versions_this_module["${task.process}"] = {
} "python": platform.python_version(),
"yaml": yaml.__version__,
}
with open("$versions") as f: with open("$versions") as f:
versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module
# aggregate versions by the module name (derived from fully-qualified process name) # aggregate versions by the module name (derived from fully-qualified process name)
versions_by_module = {} versions_by_module = {}
for process, process_versions in versions_by_process.items(): for process, process_versions in versions_by_process.items():
module = process.split(":")[-1] module = process.split(":")[-1]
try: try:
if versions_by_module[module] != process_versions: if versions_by_module[module] != process_versions:
raise AssertionError( raise AssertionError(
"We assume that software versions are the same between all modules. " "We assume that software versions are the same between all modules. "
"If you see this error-message it means you discovered an edge-case " "If you see this error-message it means you discovered an edge-case "
"and should open an issue in nf-core/tools. " "and should open an issue in nf-core/tools. "
) )
except KeyError: except KeyError:
versions_by_module[module] = process_versions versions_by_module[module] = process_versions
versions_by_module["Workflow"] = { versions_by_module["Workflow"] = {
"Nextflow": "$workflow.nextflow.version", "Nextflow": "$workflow.nextflow.version",
"$workflow.manifest.name": "$workflow.manifest.version", "$workflow.manifest.name": "$workflow.manifest.version",
} }
versions_mqc = { versions_mqc = {
"id": "software_versions", "id": "software_versions",
"section_name": "${workflow.manifest.name} Software Versions", "section_name": "${workflow.manifest.name} Software Versions",
"section_href": "https://github.com/${workflow.manifest.name}", "section_href": "https://github.com/${workflow.manifest.name}",
"plot_type": "html", "plot_type": "html",
"description": "are collected at run time from the software output.", "description": "are collected at run time from the software output.",
"data": _make_versions_html(versions_by_module), "data": _make_versions_html(versions_by_module),
} }
with open("software_versions.yml", "w") as f: with open("software_versions.yml", "w") as f:
yaml.dump(versions_by_module, f, default_flow_style=False) yaml.dump(versions_by_module, f, default_flow_style=False)
with open("software_versions_mqc.yml", "w") as f: with open("software_versions_mqc.yml", "w") as f:
yaml.dump(versions_mqc, f, default_flow_style=False) yaml.dump(versions_mqc, f, default_flow_style=False)
with open("versions.yml", "w") as f: with open("versions.yml", "w") as f:
yaml.dump(versions_this_module, f, default_flow_style=False) yaml.dump(versions_this_module, f, default_flow_style=False)
if __name__ == "__main__":
main()

View file

@ -1,38 +0,0 @@
process EIDO_CONVERT {
tag "$samplesheet"
label 'process_single'
conda (params.enable_conda ? "conda-forge::eido=0.1.9" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://containers.biocontainers.pro/s3/SingImgsRepo/eido/0.1.9_cv1/eido_0.1.9_cv1.sif' :
'biocontainers/eido:0.1.9_cv1' }"
input:
path samplesheet
val format
path pep_input_base_dir
output:
path "versions.yml" , emit: versions
path "${prefix}.${format}" , emit: samplesheet_converted
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
prefix = task.ext.prefix ?: "samplesheet_converted"
"""
eido \\
convert \\
-f $format \\
$samplesheet \\
$args \\
-p samples=${prefix}.${format}
cat <<-END_VERSIONS > versions.yml
"${task.process}":
eido: \$(echo \$(eido --version 2>&1) | sed 's/^.*eido //;s/ .*//' ))
END_VERSIONS
"""
}

View file

@ -1,39 +0,0 @@
name: "eido_convert"
description: Convert any PEP project or Nextflow samplesheet to any format
keywords:
- eido
- convert
- PEP
- format
- samplesheet
tools:
- "eido":
description: "Convert any PEP project or Nextflow samplesheet to any format"
homepage: "http://eido.databio.org/en/latest/"
documentation: "http://eido.databio.org/en/latest/"
doi: "10.1093/gigascience/giab077"
licence: "BSD-2-Clause"
input:
- samplesheet:
type: file
description: Nextflow samplesheet or PEP project
pattern: "*.{yaml,yml,csv}"
- format:
type: value
description: Extension of an output file
- pep_input_base_dir:
type: file
description: Optional path to the directory where files specified in a PEP config file are stored. Any paths specified in the config will need to be relative to this base directory.
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- samplesheet_converted:
type: file
description: PEP project or samplesheet converted to csv file
authors:
- "@rafalstepien"

View file

@ -1,33 +0,0 @@
process EIDO_VALIDATE {
tag "$samplesheet"
label 'process_single'
conda (params.enable_conda ? "conda-forge::eido=0.1.9" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://containers.biocontainers.pro/s3/SingImgsRepo/eido/0.1.9_cv2/eido_0.1.9_cv2.sif' :
'biocontainers/eido:0.1.9_cv2' }"
input:
path samplesheet
path schema
path pep_input_base_dir
output:
path "versions.yml" , emit: versions
path "*.log" , emit: log
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "validation"
"""
eido validate $args $samplesheet -s $schema -e > ${prefix}.log
cat <<-END_VERSIONS > versions.yml
"${task.process}":
eido: \$(echo \$(eido --version 2>&1) | sed 's/^.*eido //;s/ .*//' ))
END_VERSIONS
"""
}

View file

@ -1,41 +0,0 @@
name: "eido_validate"
description: Validate samplesheet or PEP config against a schema
keywords:
- eido
- validate
- schema
- format
- pep
tools:
- "validate":
description: "Validate samplesheet or PEP config against a schema."
homepage: "http://eido.databio.org/en/latest/"
documentation: "http://eido.databio.org/en/latest/"
doi: "10.1093/gigascience/giab077"
licence: "BSD-2-Clause"
input:
- samplesheet:
type: file
description: Samplesheet or PEP file to be validated
pattern: "*.{yaml,yml,csv}"
- schema:
type: file
description: Schema that the samplesheet will be validated against
pattern: "*.{yaml,yml}"
- pep_input_base_dir:
type: file
description: Optional path to the directory where files specified in a PEP config file are stored. Any paths specified in the config will need to be relative to this base directory.
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- log:
type: file
description: File containing validation log.
pattern: "*.log"
authors:
- "@rafalstepien"

View file

@ -2,7 +2,7 @@ process FASTQC {
tag "$meta.id" tag "$meta.id"
label 'process_medium' label 'process_medium'
conda (params.enable_conda ? "bioconda::fastqc=0.11.9" : null) conda "bioconda::fastqc=0.11.9"
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/fastqc:0.11.9--0' : 'https://depot.galaxyproject.org/singularity/fastqc:0.11.9--0' :
'quay.io/biocontainers/fastqc:0.11.9--0' }" 'quay.io/biocontainers/fastqc:0.11.9--0' }"
@ -20,30 +20,22 @@ process FASTQC {
script: script:
def args = task.ext.args ?: '' def args = task.ext.args ?: ''
// Add soft-links to original FastQs for consistent naming in pipeline
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
if (meta.single_end) { // Make list of old name and new name pairs to use for renaming in the bash while loop
""" def old_new_pairs = reads instanceof Path || reads.size() == 1 ? [[ reads, "${prefix}.${reads.extension}" ]] : reads.withIndex().collect { entry, index -> [ entry, "${prefix}_${index + 1}.${entry.extension}" ] }
[ ! -f ${prefix}.fastq.gz ] && ln -s $reads ${prefix}.fastq.gz def rename_to = old_new_pairs*.join(' ').join(' ')
fastqc $args --threads $task.cpus ${prefix}.fastq.gz def renamed_files = old_new_pairs.collect{ old_name, new_name -> new_name }.join(' ')
"""
printf "%s %s\\n" $rename_to | while read old_name new_name; do
[ -f "\${new_name}" ] || ln -s \$old_name \$new_name
done
fastqc $args --threads $task.cpus $renamed_files
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":
fastqc: \$( fastqc --version | sed -e "s/FastQC v//g" ) fastqc: \$( fastqc --version | sed -e "s/FastQC v//g" )
END_VERSIONS END_VERSIONS
""" """
} else {
"""
[ ! -f ${prefix}_1.fastq.gz ] && ln -s ${reads[0]} ${prefix}_1.fastq.gz
[ ! -f ${prefix}_2.fastq.gz ] && ln -s ${reads[1]} ${prefix}_2.fastq.gz
fastqc $args --threads $task.cpus ${prefix}_1.fastq.gz ${prefix}_2.fastq.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
fastqc: \$( fastqc --version | sed -e "s/FastQC v//g" )
END_VERSIONS
"""
}
stub: stub:
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"

View file

@ -2,10 +2,10 @@ process KRAKENUNIQ_PRELOADEDKRAKENUNIQ {
tag "$meta.id" tag "$meta.id"
label 'process_high' label 'process_high'
conda (params.enable_conda ? "bioconda::krakenuniq=1.0.0" : null) conda "bioconda::krakenuniq=1.0.2"
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/krakenuniq:1.0.0--pl5321h19e8d03_0': 'https://depot.galaxyproject.org/singularity/krakenuniq:1.0.2--pl5321h19e8d03_0':
'quay.io/biocontainers/krakenuniq:1.0.0--pl5321h19e8d03_0' }" 'quay.io/biocontainers/krakenuniq:1.0.2--pl5321h19e8d03_0' }"
input: input:
tuple val(meta), path(fastqs) tuple val(meta), path(fastqs)

View file

@ -1,7 +1,7 @@
process MULTIQC { process MULTIQC {
label 'process_single' label 'process_single'
conda (params.enable_conda ? 'bioconda::multiqc=1.13' : null) conda "bioconda::multiqc=1.13"
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' : 'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' :
'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }" 'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }"

View file

@ -39,7 +39,6 @@ params {
validate_params = true validate_params = true
show_hidden_params = false show_hidden_params = false
schema_ignore_params = 'genomes,fasta' schema_ignore_params = 'genomes,fasta'
enable_conda = false
// Config options // Config options
@ -182,7 +181,6 @@ try {
profiles { profiles {
debug { process.beforeScript = 'echo $HOSTNAME' } debug { process.beforeScript = 'echo $HOSTNAME' }
conda { conda {
params.enable_conda = true
conda.enabled = true conda.enabled = true
docker.enabled = false docker.enabled = false
singularity.enabled = false singularity.enabled = false
@ -191,7 +189,6 @@ profiles {
charliecloud.enabled = false charliecloud.enabled = false
} }
mamba { mamba {
params.enable_conda = true
conda.enabled = true conda.enabled = true
conda.useMamba = true conda.useMamba = true
docker.enabled = false docker.enabled = false
@ -252,7 +249,6 @@ profiles {
test_nothing { includeConfig 'conf/test_nothing.config' } test_nothing { includeConfig 'conf/test_nothing.config' }
test_motus { includeConfig 'conf/test_motus.config' } test_motus { includeConfig 'conf/test_motus.config' }
test_krakenuniq { includeConfig 'conf/test_krakenuniq.config' } test_krakenuniq { includeConfig 'conf/test_krakenuniq.config' }
test_pep { includeConfig 'conf/test_pep.config' }
} }

View file

@ -696,12 +696,6 @@
"description": "Show all params when using `--help`", "description": "Show all params when using `--help`",
"hidden": true, "hidden": true,
"help_text": "By default, parameters set as _hidden_ in the schema are not shown on the command line when a user runs with `--help`. Specifying this option will tell the pipeline to show all parameters." "help_text": "By default, parameters set as _hidden_ in the schema are not shown on the command line when a user runs with `--help`. Specifying this option will tell the pipeline to show all parameters."
},
"enable_conda": {
"type": "boolean",
"description": "Run this workflow with Conda. You can also use '-profile conda' instead of providing this parameter.",
"hidden": true,
"fa_icon": "fas fa-bacon"
} }
} }
}, },

View file

@ -2,62 +2,36 @@
// Check input samplesheet and get read channels // Check input samplesheet and get read channels
// //
include { EIDO_VALIDATE } from '../../modules/nf-core/eido/validate/main' include { SAMPLESHEET_CHECK } from '../../modules/local/samplesheet_check'
include { EIDO_CONVERT } from '../../modules/nf-core/eido/convert/main'
workflow INPUT_CHECK { workflow INPUT_CHECK {
take: take:
samplesheet_or_pep_config // file: /path/to/samplesheet.csv or /path/to/pep/config.yaml samplesheet // file: /path/to/samplesheet.csv
pep_input_base_dir
main: main:
ch_versions = Channel.empty() parsed_samplesheet = SAMPLESHEET_CHECK ( samplesheet )
.csv
EIDO_VALIDATE ( samplesheet_or_pep_config, file("$projectDir/assets/samplesheet_schema.yaml"), pep_input_base_dir )
ch_versions = ch_versions.mix(EIDO_VALIDATE.out.versions)
EIDO_CONVERT ( samplesheet_or_pep_config, "csv", pep_input_base_dir )
ch_versions = ch_versions.mix(EIDO_CONVERT.out.versions)
ch_parsed_samplesheet = EIDO_CONVERT.out.samplesheet_converted
.splitCsv ( header:true, sep:',' ) .splitCsv ( header:true, sep:',' )
.map { check_missing_and_singleend_autodetect(it) }
.branch { .branch {
fasta: it['fasta'] != '' fasta: it['fasta'] != ''
nanopore: it['instrument_platform'] == 'OXFORD_NANOPORE' nanopore: it['instrument_platform'] == 'OXFORD_NANOPORE'
fastq: true fastq: true
} }
ch_parsed_samplesheet.fastq fastq = parsed_samplesheet.fastq
.map { create_fastq_channel(it) } .map { create_fastq_channel(it) }
.set { fastq }
ch_parsed_samplesheet.nanopore nanopore = parsed_samplesheet.nanopore
.map { create_fastq_channel(it) } .map { create_fastq_channel(it) }
.set { nanopore }
ch_parsed_samplesheet.fasta fasta = parsed_samplesheet.fasta
.map { create_fasta_channel(it) } .map { create_fasta_channel(it) }
.set { fasta }
emit: emit:
fastq = fastq ?: [] // channel: [ val(meta), [ reads ] ] fastq = fastq ?: [] // channel: [ val(meta), [ reads ] ]
nanopore = nanopore ?: [] // channel: [ val(meta), [ reads ] ] nanopore = nanopore ?: [] // channel: [ val(meta), [ reads ] ]
fasta = fasta ?: [] // channel: [ val(meta), fasta ] fasta = fasta ?: [] // channel: [ val(meta), fasta ]
versions = ch_versions // channel: [ versions.yml ] versions = SAMPLESHEET_CHECK.out.versions // channel: [ versions.yml ]
}
// Function to validate input sheet and auto-detect R1/R2
def check_missing_and_singleend_autodetect(LinkedHashMap row) {
// Checks not supported by EIDO(?)
if ( ( row['fastq_1'] != "" || row['fastq_2'] != "" ) && row['fasta'] != "" ) { exit 1, "[nf-core/taxprofiler] ERROR: FastQ and FastA files cannot be specified together in the same library. Check input samplesheet! Check sample: ${row['sample']}" }
if ( row['fastq_1'] == "" && row['fastq_2'] != "" ) { exit 1, "[nf-core/taxprofiler] ERROR: Input samplesheet has a missing fastq_1 when fastq_2 is specified. Check sample: ${row['sample']}" }
single_end = row['fastq_2'] == "" ? true : false
row['single_end'] = single_end
return row
} }
// Function to get list of [ meta, [ fastq_1, fastq_2 ] ] // Function to get list of [ meta, [ fastq_1, fastq_2 ] ]
@ -87,12 +61,11 @@ def create_fastq_channel(LinkedHashMap row) {
if (!file(row.fastq_2).exists()) { if (!file(row.fastq_2).exists()) {
exit 1, "ERROR: Please check input samplesheet -> Read 2 FastQ file does not exist!\n${row.fastq_2}" exit 1, "ERROR: Please check input samplesheet -> Read 2 FastQ file does not exist!\n${row.fastq_2}"
} }
fastq_meta = [ meta, [ file(row.fastq_1), file(row.fastq_2) ] ] fastq_meta = [ meta, [ file(row.fastq_1), file(row.fastq_2) ] ]
} }
} }
return fastq_meta return fastq_meta
}// Function to get list of [ meta, fasta ] }// Function to get list of [ meta, fasta ]
def create_fasta_channel(LinkedHashMap row) { def create_fasta_channel(LinkedHashMap row) {
def meta = [:] def meta = [:]

View file

@ -20,9 +20,8 @@ for (param in checkPathParamList) { if (param) { file(param, checkIfExists: true
// Check mandatory parameters // Check mandatory parameters
if ( params.input ) { if ( params.input ) {
ch_input = file(params.input, checkIfExists: true) ch_input = file(params.input, checkIfExists: true)
pep_input_base_dir = file(params.input).extension.matches("yaml|yml") ? file(file(params.input).getParent(), checkIfExists: true) : []
} else { } else {
exit 1, "Input samplesheet, or PEP config and base directory not specified" exit 1, "Input samplesheet not specified"
} }
if (params.databases) { ch_databases = file(params.databases, checkIfExists: true) } else { exit 1, 'Input database sheet not specified!' } if (params.databases) { ch_databases = file(params.databases, checkIfExists: true) } else { exit 1, 'Input database sheet not specified!' }
@ -115,7 +114,7 @@ workflow TAXPROFILER {
SUBWORKFLOW: Read in samplesheet, validate and stage input files SUBWORKFLOW: Read in samplesheet, validate and stage input files
*/ */
INPUT_CHECK ( INPUT_CHECK (
ch_input, pep_input_base_dir ch_input
) )
ch_versions = ch_versions.mix(INPUT_CHECK.out.versions) ch_versions = ch_versions.mix(INPUT_CHECK.out.versions)
@ -251,7 +250,7 @@ workflow TAXPROFILER {
*/ */
CUSTOM_DUMPSOFTWAREVERSIONS ( CUSTOM_DUMPSOFTWAREVERSIONS (
ch_versions.unique{ it.text }.collectFile(name: 'collated_versions.yml') ch_versions.unique().collectFile(name: 'collated_versions.yml')
) )