Merge branch 'master' into tool/elprep-merge

This commit is contained in:
Matthias De Smet 2022-04-22 10:20:59 +02:00 committed by GitHub
commit a1757897cb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
49 changed files with 974 additions and 55 deletions

View file

@ -0,0 +1,41 @@
def VERSION = '1.0.3' // Version information not provided by tool
process AMPLIFY_PREDICT {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::amplify=1.0.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/amplify:1.0.3--py36hdfd78af_0':
'quay.io/biocontainers/amplify:1.0.3--py36hdfd78af_0' }"
input:
tuple val(meta), path(faa)
path(model_dir)
output:
tuple val(meta), path('*.tsv'), emit: tsv
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def custom_model_dir = model_dir ? "-md ${model_dir}" : ""
"""
AMPlify \\
$args \\
${custom_model_dir} \\
-s '${faa}'
#rename output, because tool includes date and time in name
mv *.tsv ${prefix}.tsv
cat <<-END_VERSIONS > versions.yml
"${task.process}":
AMPlify: $VERSION
END_VERSIONS
"""
}

View file

@ -0,0 +1,47 @@
name: "amplify_predict"
description: AMPlify is an attentive deep learning model for antimicrobial peptide prediction.
keywords:
- antimicrobial peptides
- AMPs
- prediction
- model
tools:
- "amplify":
description: "Attentive deep learning model for antimicrobial peptide prediction"
homepage: "https://github.com/bcgsc/AMPlify"
documentation: "https://github.com/bcgsc/AMPlify"
tool_dev_url: "https://github.com/bcgsc/AMPlify"
doi: "https://doi.org/10.1186/s12864-022-08310-4"
licence: "['GPL v3']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- faa:
type: file
description: amino acid sequences fasta
pattern: "*.{fa,fa.gz,faa,faa.gz,fasta,fasta.gz}"
- model_dir:
type: directory
description: Directory of where models are stored (optional)
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- tsv:
type: file
description: amino acid sequences with prediction (AMP, non-AMP) and probability scores
pattern: "*.{tsv}"
authors:
- "@louperelo"

View file

@ -0,0 +1,46 @@
process ANTISMASH_ANTISMASHLITEDOWNLOADDATABASES {
label 'process_low'
conda (params.enable_conda ? "bioconda::antismash-lite=6.0.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/antismash-lite:6.0.1--pyhdfd78af_1' :
'quay.io/biocontainers/antismash-lite:6.0.1--pyhdfd78af_1' }"
/*
These files are normally downloaded by download-antismash-databases itself, and must be retrieved for input by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines. This is solely for use for CI tests of the nf-core/module version of antiSMASH.
Reason: Upon execution, the tool checks if certain database files are present within the container and if not, it tries to create them in /usr/local/bin, for which only root user has write permissions. Mounting those database files with this module prevents the tool from trying to create them.
*/
containerOptions {
workflow.containerEngine == 'singularity' ?
"-B $database_css:/usr/local/lib/python3.8/site-packages/antismash/outputs/html/css,$database_detection:/usr/local/lib/python3.8/site-packages/antismash/detection,$database_modules:/usr/local/lib/python3.8/site-packages/antismash/modules" :
workflow.containerEngine == 'docker' ?
"-v \$PWD/$database_css:/usr/local/lib/python3.8/site-packages/antismash/outputs/html/css -v \$PWD/$database_detection:/usr/local/lib/python3.8/site-packages/antismash/detection -v \$PWD/$database_modules:/usr/local/lib/python3.8/site-packages/antismash/modules" :
''
}
input:
path database_css
path database_detection
path database_modules
output:
path("antismash_db") , emit: database
path "versions.yml", emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
"""
download-antismash-databases \\
--database-dir antismash_db \\
$args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
antismash: \$(antismash --version | sed 's/antiSMASH //')
END_VERSIONS
"""
}

View file

@ -0,0 +1,55 @@
name: antismash_antismashlitedownloaddatabases
description: antiSMASH allows the rapid genome-wide identification, annotation and analysis of secondary metabolite biosynthesis gene clusters. This module downloads the antiSMASH databases.
keywords:
- secondary metabolites
- BGC
- biosynthetic gene cluster
- genome mining
- NRPS
- RiPP
- antibiotics
- prokaryotes
- bacteria
- eukaryotes
- fungi
- antismash
- database
tools:
- antismash:
description: antiSMASH - the antibiotics and Secondary Metabolite Analysis SHell
homepage: https://docs.antismash.secondarymetabolites.org
documentation: https://docs.antismash.secondarymetabolites.org
tool_dev_url: https://github.com/antismash/antismash
doi: "10.1093/nar/gkab335"
licence: ["AGPL v3"]
input:
- database_css:
type: directory
description: |
antismash/outputs/html/css folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the use by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "css"
- database_detection:
type: directory
description: |
antismash/detection folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the use by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "detection"
- database_modules:
type: directory
description: |
antismash/modules folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the use by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "modules"
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- database:
type: directory
description: Download directory for antiSMASH databases
pattern: "antismash_db"
authors:
- "@jasmezz"

2
modules/bclconvert/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
bcl-convert
*.rpm

View file

@ -0,0 +1,15 @@
# Dockerfile to create container with bcl-convert
# Push to nfcore/bclconvert:<VER>
FROM debian:bullseye-slim
LABEL authors="Matthias De Smet <matthias.desmet@ugent.be>" \
description="Docker image containing bcl-convert"
# Disclaimer: this container is not provided nor supported by Illumina
# 'ps' command is need by some nextflow executions to collect system stats
# Install procps and clean apt cache
RUN apt-get update \
&& apt-get install -y \
procps \
&& apt-get clean -y && rm -rf /var/lib/apt/lists/*
COPY bcl-convert /usr/local/bin/bcl-convert
RUN chmod +x /usr/local/bin/bcl-convert

View file

@ -0,0 +1,30 @@
ILLUMINA END-USER SOFTWARE LICENSE AGREEMENT
IMPORTANT-READ CAREFULLY. THIS IS A LICENSE AGREEMENT THAT YOU ARE REQUIRED TO ACCEPT BEFORE, DOWNLOADING, INSTALLING AND USING ANY SOFTWARE MADE AVAILABLE FROM THE ILLUMINA SUPPORT CENTER (https://support.illumina.com).
CAREFULLY READ ALL THE TERMS AND CONDITIONS OF THIS LICENSE AGREEMENT BEFORE PROCEEDING WITH DOWNLOADING, INSTALLING, AND/OR USING THE SOFTWARE. YOU ARE NOT PERMITTED TO DOWNLOAD, INSTALL, AND/OR USE THE SOFTWARE UNTIL YOU HAVE AGREED TO BE BOUND BY ALL OF THE TERMS AND CONDITIONS OF THIS LICENSE AGREEMENT. YOU REPRESENT AND WARRANT THAT YOU ARE DULY AUTHORIZED TO ACCEPT THE TERMS AND CONDITIONS OF THIS LICENSE AGREEMENT ON BEHALF OF YOUR EMPLOYER.
Software made available through the Illumina Support Center is licensed, not sold, to you. Your license to each software program made available through the Illumina Support Center is subject to your prior acceptance of either this Illumina End-User Software License Agreement (“Agreement”), or a custom end user license agreement (“Custom EULA”), if one is provided with the software. Any software that is subject to this Agreement is referred to herein as the “Software.” By accepting this Agreement, you agree the terms and conditions of this Agreement will apply to and govern any and all of your downloads, installations, and uses of each Illumina software program made available through the Illumina Support Center, except that your download, installation, and use of any software provided with a Custom EULA will be governed by the terms and conditions of the Custom EULA.
This Agreement is made and entered into by and between Illumina, Inc., a Delaware corporation, having offices at 5200 Illumina Way, San Diego, CA 92122 (“Illumina”) and you as the end-user of the Software (hereinafter, “Licensee” or “you”). All software, firmware, and associated media, printed materials, and online and electronic documentation, including any updates or upgrades thereof, made available through the Illumina Support Center (collectively, “Software”) provided to Licensee are for use solely by Licensee and the provisions herein WILL apply with respect to such Software.
License Grant. Subject to the terms and conditions of this Agreement, Illumina grants to Licensee, under the following terms and conditions, a personal, non-exclusive, revocable, non-transferable, non-sublicensable license, for its internal end-use purposes only, in the ordinary course of Licensees business to use the Software in executable object code form only, solely at the Licensees facility to, install and use the Software on a single computer accessible only by Licensee (and not on any public network or server), where the single computer is owned, leased, or otherwise substantially controlled by Licensee, for the purpose of processing and analyzing data generated from an Illumina genetic sequencing instrument owned and operated solely by Licensee (the “Product”). In the case of Software provided by Illumina in non-compiled form, Illumina grants Licensee a personal, non-exclusive, non-sublicenseable, restricted right to compile, install, and use one copy of the Software solely for processing and analyzing data generated from the Product.
License Restrictions. Except as expressly permitted in Section 1, Licensee may not make, have made, import, use, copy, reproduce, distribute, display, publish, sell, re-sell, lease, or sub-license the Software, in whole or in part, except as expressly provided for in this Agreement. Licensee may not modify, improve, translate, reverse engineer, decompile, disassemble, or create derivative works of the Software or otherwise attempt to (a) defeat, avoid, by-pass, remove, deactivate, or otherwise circumvent any software protection mechanisms in the Software including, without limitation, any such mechanism used to restrict or control the functionality of the Software, or (b) derive the source code or the underlying ideas, algorithms, structure, or organization form of the Software. Licensee will not allow, at any time, including during and after the term of the license, the Software or any portions or copies thereof in any form to become available to any third parties. Licensee may use the Software solely with genomic data that is generated using the Product; Licensee may not use the Software with any data generated from other products or instruments. Licensee may not use the Software to perform any data analysis services for any third party.
Ownership. The Software is protected by United States and international intellectual property laws. All right, title, and interest in and to the Software (including associated intellectual property rights) are and will remain vested in Illumina or Illuminas affiliated companies or licensors. Licensee acknowledges that no rights, license or interest to any Illumina trademarks are granted hereunder. Licensee acknowledges that unauthorized reproduction or distribution of the Software, or any portion of it, may result in severe civil and criminal penalties. Illumina reserves all rights in and to the Software not expressly granted to Licensee under this Agreement.
Upgrades/Updates. Illumina may, at its sole discretion, provide updates or upgrades to the Software. In that case, Licensee WILL have the same rights and obligations under such updates or upgrades as it has for the versions of the Software initially provided to Licensee hereunder. Licensee recognizes that Illumina is not obligated to provide any upgrades or updates to, or support for, the Software.
Data Integrity/Loss. Licensee is responsible for the integrity and availability, including preventing the loss of data that Licensee generates, uses, analyzes, manages, or stores in connection with or through its use of the Software, including without limitation, investigating and implementing industry appropriate policies and procedures regarding the provision of access to Licensees data, monitoring access and use of Licensees data, conducting routine backups and archiving of Licensees data, and ensuring the adequacy of anti-virus software. Accordingly, Licensee agrees that Illumina is not responsible for any inability to access, loss or corruption of data as a result of Licensees use of the Software, and Illumina has no liability to Licensee in connection with such inability to access, loss or corruption of data.
Term of License. This Agreement will be in effect from the time Licensee expressly accepts the terms and conditions of this license, or otherwise installs the Software, thereby accepting the terms and conditions contained herein, and will remain in effect until terminated. This license will otherwise terminate upon the conditions set forth in this Agreement, if revoked by Illumina, or if Licensee fails to comply with any term or condition of this Agreement including failure to pay any applicable license fee. Licensee agrees upon termination of this Agreement for any reason to immediately discontinue use of and un-install the Software and destroy all copies of the Software in its possession and/or under its control, and return or destroy, at Illuminas option, any compact disks, floppy disks or other media provided by Illumina storing the Software thereon (together with any authorized copies thereof), as well as any documentation associated therewith
Limited Warranty. Illumina warrants that, for a period of 6 months from the date of download or installation of the Software by Licensee, the Software will perform in all material respects in accordance with the accompanying documentation available on the Illumina Support Center. EXCEPT AND TO THE EXTENT EXPRESSLY PROVIDED IN THE FOREGOING, AND TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED “AS IS” AND ILLUMINA EXPRESSLY DISCLAIMS ALL WARRANTIES AND CONDITIONS REGARDING THE SOFTWARE AND RESULTS GENERATED BY THE SOFTWARE, INCLUDING WITHOUT LIMITATION, TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, ALL OTHER EXPRESS OR IMPLIED WARRANTIES OR CONDITIONS OF MERCHANTABLE QUALITY, NON-INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE, AND THOSE ARISING BY STATUTE OR OTHERWISE IN LAW OR FROM A COURSE OF DEALING OR USAGE OF TRADE. ILLUMINA DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN THE SOFTWARE WILL MEET LICENSEE"S REQUIREMENTS, OR THAT THE OPERATION OF THE SOFTWARE WILL BE ERROR FREE OR UNINTERRUPTED.
Limitation of Liability.
(a) ILLUMINAS ENTIRE LIABILITY AND LICENSEE"S EXCLUSIVE REMEDY UNDER THE LIMITED WARRANTY PROVISION OF SECTION 7 ABOVE WILL BE, AT ILLUMINAS OPTION, EITHER (i) RETURN OF THE PRICE PAID FOR THE SOFTWARE, OR (ii) REPAIR OR REPLACEMENT OF THE PORTIONS OF THE SOFTWARE THAT DO NOT COMPLY WITH ILLUMINAS LIMITED WARRANTY. THIS LIMITED WARRANTY IS VOID AND ILLUMINA WILL HAVE NO LIABILITY AT ALL IF FAILURE OF THE SOFTWARE TO COMPLY WITH ILLUMINA LIMITED WARRANTY HAS RESULTED FROM: (w) FAILURE TO USE THE SOFTWARE IN ACCORDANCE WITH ILLUMINAS THEN CURRENT USER MANUAL OR THIS AGREEMENT; (x) ACCIDENT, ABUSE, OR MISAPPLICATION; (y) PRODUCTS OR EQUIPMENT NOT SPECIFIED BY ILLUMINA AS BEING COMPATIBLE WITH THE SOFTWARE; OR (z) IF LICENSEE HAS NOT NOTIFIED ILLUMINA IN WRITING OF THE DEFECT WITHIN THE ABOVE WARRANTY PERIOD.
(b) TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL ILLUMINA BE LIABLE UNDER ANY THEORY OF CONTRACT, TORT, STRICT LIABILITY OR OTHER LEGAL OR EQUITABLE THEORY FOR ANY PERSONAL INJURY OR ANY INDIRECT, CONSEQUENTIAL, OR INCIDENTAL DAMAGES, EVEN IF ILLUMINA HAS BEEN ADVISED OF THE POSSIBILITY THEREOF INCLUDING, WITHOUT LIMITATION, LOST PROFITS, LOST DATA, INTERRUPTION OF BUSINESS, LOST BUSINESS REVENUE, OTHER ECONOMIC LOSS, OR ANY LOSS OF RECORDED DATA ARISING OUT OF THE USE OF OR INABILITY TO USE THE SOFTWARE. EXCEPT AND TO THE EXTENT EXPRESSLY PROVIDED IN SECTION 7 AND 8(a) ABOVE OR AS OTHERWISE PERMITTED BY LAW, IN NO EVENT WILL ILLUMINAS TOTAL LIABILITY TO LICENSEE FOR ALL DAMAGES (OTHER THAN AS MAY BE REQUIRED BY APPLICABLE LAW IN CASES INVOLVING PERSONAL INJURY) EXCEED THE AMOUNT OF $500 USD. THE FOREGOING LIMITATIONS WILL APPLY EVEN IF THE ABOVE STATED REMEDY FAILS OF ITS ESSENTIAL PURPOSE.
Survival. The limitations of liability and ownership rights of Illumina contained herein and Licensees obligations following termination of this Agreement WILL survive the termination of this Agreement for any reason.
Research Use Only. The Software is labeled with a For Research Use Only or similar labeling statement and the performance characteristics of the Software have not been established and the Software is not for use in diagnostic procedures. Licensee acknowledges and agrees that (i) the Software has not been approved, cleared, or licensed by the United States Food and Drug Administration or any other regulatory entity whether foreign or domestic for any specific intended use, whether research, commercial, diagnostic, or otherwise, and (ii) Licensee must ensure it has any regulatory approvals that are necessary for Licensees intended uses of the Software. Licensee will comply with all applicable laws and regulations when using and maintaining the Software.
General. Licensee may not sublicense, assign, share, pledge, rent or transfer any of its rights under this Agreement in relation to the Software or any portion thereof including documentation. Illumina reserves the right to change this Agreement at any time. When Illumina makes any changes, Illumina will provide the updated Agreement, or a link to it, on Illuminas website (www.illumina.com) and such updated Agreement WILL become effective immediately. Licensees continued access to or use of the Software represents Licensees agreement to any revised Agreement. If one or more provisions of this Agreement are found to be invalid or unenforceable, this Agreement WILL not be rendered inoperative but the remaining provisions WILL continue in full force and effect. This Agreement constitutes the entire agreement between the parties with respect to the subject matter of this Agreement and merges all prior communications except that a “hard-copy” form of licensing agreement relating to the Software previously agreed to in writing by Illumina and Licensee WILL supersede and govern in the event of any conflicting provisions.
Governing Law. This Agreement WILL be governed by and construed in accordance with the laws of the state of California, USA, without regard to its conflicts of laws principles, and independent of where a suit or action hereunder may be filed.
U.S. Government End Users. If Licensee is a branch agency or instrumentality of the United States Government, the following provision applies. The Software is a “commercial item” as that term is defined at 48 C.F.R. 2.101, consisting of “commercial computer software” and “commercial computer software documentation,” as such terms are used in 48 C.F.R. 12.212 or 48 C.F.R. 227.7202 (as applicable). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4, all United States Government end users acquire the Software with only those rights set forth herein.
Contact. Any questions regarding legal rights, duties, obligations, or restrictions associated with the software hereunder should be directed to Illumina, Inc., 5200 Illumina Way, San Diego, CA 92122, Attention: Legal Department, Phone: (858) 202-4500, Fax: (858) 202-4599, web site: www.illumina.com <http://www.illumina.com>.
Third Party Components. The Software may include third party software (“Third Party Programs”). Some of the Third Party Programs are available under open source or free software licenses. The License Agreement accompanying the Licensed Software does not alter any rights or obligations Licensee may have under those open source or free software licenses. The licenses that govern the terms and conditions of use of the Third Party Programs included in the Licensed Software are provided in the READ ME provided with the Software. The READ ME also contains copyright statements for the various open source software components (or portions thereof) that are distributed with the Licensed Software.
END OF END-USER SOFTWARE LICENSE AGREEMENT.

View file

@ -0,0 +1,17 @@
# Updating the docker container and making a new module release
bcl-convert is a commercial tool from Illumina. The container provided for the bcl-convert nf-core module is not provided nor supported by Illumina. Updating the bcl-convert versions in the container and pushing the update to Dockerhub needs to be done manually.
1. Navigate to the appropriate download page. - [BCL Convert](https://support.illumina.com/sequencing/sequencing_software/bcl-convert/downloads.html): download the rpm of the desired bcl-convert version with `curl` or `wget`.
2. Unpack the RPM package using `rpm2cpio bcl-convert-*.rpm | cpio -i --make-directories`. Place the executable located in `<unpack_dir>/usr/bin/bcl-convert` in the same folder where the Dockerfile lies.
3. Create and test the container:
```bash
docker build . -t nfcore/bclconvert:<VERSION>
```
4. Access rights are needed to push the container to the Dockerhub nfcore organization, please ask a core team member to do so.
```bash
docker push nfcore/bclconvert:<VERSION>
```

View file

@ -0,0 +1,81 @@
process BCLCONVERT {
tag '$samplesheet'
label 'process_high'
if (params.enable_conda) {
exit 1, "Conda environments cannot be used when using bcl-convert. Please use docker or singularity containers."
}
container "nfcore/bclconvert:3.9.3"
input:
path samplesheet
path run_dir
output:
path "*.fastq.gz" ,emit: fastq
path "Reports/*.{csv,xml,bin}" ,emit: reports
path "Logs/*.{log,txt}" ,emit: logs
path "InterOp/*.bin" ,emit: interop
path "versions.yml" ,emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
"""
bcl-convert \
$args \\
--output-directory . \\
--bcl-input-directory ${run_dir} \\
--sample-sheet ${samplesheet} \\
--bcl-num-parallel-tiles ${task.cpus}
mkdir InterOp
cp ${run_dir}/InterOp/*.bin InterOp/
mv Reports/*.bin InterOp/
cat <<-END_VERSIONS > versions.yml
"${task.process}":
bclconvert: \$(bcl-convert -V 2>&1 | head -n 1 | sed 's/^.*Version //')
END_VERSIONS
"""
stub:
"""
echo "sample1_S1_L001_R1_001" > sample1_S1_L001_R1_001.fastq.gz
echo "sample1_S1_L001_R2_001" > sample1_S1_L001_R2_001.fastq.gz
echo "sample1_S1_L002_R1_001" > sample1_S1_L002_R1_001.fastq.gz
echo "sample1_S1_L002_R2_001" > sample1_S1_L002_R2_001.fastq.gz
echo "sample2_S2_L001_R1_001" > sample2_S2_L001_R1_001.fastq.gz
echo "sample2_S2_L001_R2_001" > sample2_S2_L001_R2_001.fastq.gz
echo "sample2_S2_L002_R1_001" > sample2_S2_L002_R1_001.fastq.gz
echo "sample2_S2_L002_R2_001" > sample2_S2_L002_R2_001.fastq.gz
mkdir Reports
echo "Adapter_Metrics" > Reports/Adapter_Metrics.csv
echo "Demultiplex_Stats" > Reports/Demultiplex_Stats.csv
echo "fastq_list" > Reports/fastq_list.csv
echo "Index_Hopping_Counts" > Reports/Index_Hopping_Counts.csv
echo "IndexMetricsOut" > Reports/IndexMetricsOut.bin
echo "Quality_Metrics" > Reports/Quality_Metrics.csv
echo "RunInfo" > Reports/RunInfo.xml
echo "SampleSheet" > Reports/SampleSheet.csv
echo "Top_Unknown_Barcodes" > Reports/Top_Unknown_Barcodes.csv
mkdir Logs
echo "Errors" > Logs/Errors.log
echo "FastqComplete" > Logs/FastqComplete.txt
echo "Info" > Logs/Info.log
echo "Warnings" > Logs/Warnings.log
mkdir InterOp/
echo "InterOp" > InterOp/InterOp.bin
cat <<-END_VERSIONS > versions.yml
"${task.process}":
bclconvert: \$(bcl-convert -V 2>&1 | head -n 1 | sed 's/^.*Version //')
END_VERSIONS
"""
}

View file

@ -0,0 +1,45 @@
name: "bclconvert"
description: Demultiplex Illumina BCL files
keywords:
- demultiplex
- illumina
- fastq
tools:
- "bclconvert":
description: "Demultiplex Illumina BCL files"
homepage: "https://support.illumina.com/sequencing/sequencing_software/bcl-convert.html"
documentation: "https://support-docs.illumina.com/SW/BCL_Convert/Content/SW/FrontPages/BCL_Convert.htm"
licence: "ILLUMINA"
input:
- samplesheet:
type: file
description: "Input samplesheet"
pattern: "*.{csv}"
- run_dir:
type: directory
description: "Input run directory containing RunInfo.xml and BCL data"
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- fastq:
type: file
description: Demultiplexed FASTQ files
pattern: "*.{fastq.gz}"
- reports:
type: file
description: Demultiplexing Reports
pattern: "Reports/*.{csv,xml}"
- logs:
type: file
description: Log files
pattern: "Logs/*.{log,txt}"
- interop:
type: file
description: Interop files
pattern: "Interop/*.{bin}"
authors:
- "@matthdsm"

View file

@ -0,0 +1,44 @@
process ELPREP_SPLIT {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::elprep=5.1.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/elprep:5.1.2--he881be0_0':
'quay.io/biocontainers/elprep:5.1.2--he881be0_0' }"
input:
tuple val(meta), path(bam)
output:
tuple val(meta), path("**.{bam,sam}"), emit: bam
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
meta.single_end ? args += " --single-end": ""
"""
# create directory and move all input so elprep can find and merge them before splitting
mkdir input
mv ${bam} input/
mkdir ${prefix}
elprep split \\
input \\
. \\
$args \\
--nr-of-threads $task.cpus \\
--output-prefix $prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
elprep: \$(elprep 2>&1 | head -n2 | tail -n1 |sed 's/^.*version //;s/ compiled.*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,43 @@
name: "elprep_split"
description: Split bam file into manageable chunks
keywords:
- bam
- split by chromosome
tools:
- "elprep":
description: "elPrep is a high-performance tool for preparing .sam/.bam files for variant calling in sequencing pipelines. It can be used as a drop-in replacement for SAMtools/Picard/GATK4."
homepage: "https://github.com/ExaScience/elprep"
documentation: "https://github.com/ExaScience/elprep"
tool_dev_url: "https://github.com/ExaScience/elprep"
doi: "10.1371"
licence: "['AGPL v3']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: List of BAM/SAM files
pattern: "*.{bam,sam}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
#
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- bam:
type: file
description: List of split BAM/SAM files
pattern: "*.{bam,sam}"
authors:
- "@matthdsm"

View file

@ -10,11 +10,14 @@ process KRAKEN2_KRAKEN2 {
input:
tuple val(meta), path(reads)
path db
val save_output_fastqs
val save_reads_assignment
output:
tuple val(meta), path('*classified*') , emit: classified
tuple val(meta), path('*unclassified*'), emit: unclassified
tuple val(meta), path('*report.txt') , emit: txt
tuple val(meta), path('*classified*') , optional:true, emit: classified_reads_fastq
tuple val(meta), path('*unclassified*') , optional:true, emit: unclassified_reads_fastq
tuple val(meta), path('*classifiedreads*'), optional:true, emit: classified_reads_assignment
tuple val(meta), path('*report.txt') , emit: report
path "versions.yml" , emit: versions
when:
@ -26,19 +29,25 @@ process KRAKEN2_KRAKEN2 {
def paired = meta.single_end ? "" : "--paired"
def classified = meta.single_end ? "${prefix}.classified.fastq" : "${prefix}.classified#.fastq"
def unclassified = meta.single_end ? "${prefix}.unclassified.fastq" : "${prefix}.unclassified#.fastq"
def classified_command = save_output_fastqs ? "--classified-out ${classified}" : ""
def unclassified_command = save_output_fastqs ? "--unclassified-out ${unclassified}" : ""
def readclassification_command = save_reads_assignment ? "--output ${prefix}.kraken2.classifiedreads.txt" : ""
def compress_reads_command = save_output_fastqs ? "pigz -p $task.cpus *.fastq" : ""
"""
kraken2 \\
--db $db \\
--threads $task.cpus \\
--unclassified-out $unclassified \\
--classified-out $classified \\
--report ${prefix}.kraken2.report.txt \\
--gzip-compressed \\
$unclassified_command \\
$classified_command \\
$readclassification_command \\
$paired \\
$args \\
$reads
pigz -p $task.cpus *.fastq
$compress_reads_command
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -27,25 +27,40 @@ input:
- db:
type: directory
description: Kraken2 database
- save_output_fastqs:
type: boolean
description: |
If true, optional commands are added to save classified and unclassified reads
as fastq files
- save_reads_assignment:
type: boolean
description: |
If true, an optional command is added to save a file reporting the taxonomic
classification of each input read
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- classified:
- classified_reads_fastq:
type: file
description: |
Reads classified to belong to any of the taxa
Reads classified as belonging to any of the taxa
on the Kraken2 database.
pattern: "*{fastq.gz}"
- unclassified:
- unclassified_reads_fastq:
type: file
description: |
Reads not classified to belong to any of the taxa
Reads not classified to any of the taxa
on the Kraken2 database.
pattern: "*{fastq.gz}"
- txt:
- classified_reads_assignment:
type: file
description: |
Kraken2 output file indicating the taxonomic assignment of
each input read
- report:
type: file
description: |
Kraken2 report containing stats about classified

View file

@ -15,7 +15,7 @@ process PICARD_COLLECTHSMETRICS {
path target_intervals
output:
tuple val(meta), path("*collecthsmetrics.txt"), emit: hs_metrics
tuple val(meta), path("*_metrics") , emit: metrics
path "versions.yml" , emit: versions
when:
@ -41,7 +41,8 @@ process PICARD_COLLECTHSMETRICS {
-BAIT_INTERVALS $bait_intervals \\
-TARGET_INTERVALS $target_intervals \\
-INPUT $bam \\
-OUTPUT ${prefix}_collecthsmetrics.txt
-OUTPUT ${prefix}.CollectHsMetrics.coverage_metrics
cat <<-END_VERSIONS > versions.yml
"${task.process}":
@ -52,7 +53,7 @@ process PICARD_COLLECTHSMETRICS {
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}_collecthsmetrics.txt
touch ${prefix}.CollectHsMetrics.coverage_metrics
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -57,10 +57,11 @@ output:
type: file
description: File containing software versions
pattern: "versions.yml"
- hs_metrics:
- metrics:
type: file
description: The metrics file.
pattern: "*_collecthsmetrics.txt"
description: Alignment metrics files generated by picard
pattern: "*_{metrics}"
authors:
- "@projectoriented"
- "@matthdsm"

View file

@ -0,0 +1,47 @@
process SAMTOOLS_COLLATEFASTQ {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
input:
tuple val(meta), path(input)
output:
//TODO might be good to have ordered output of the fastq files, so we can
// make sure the we get the right files
tuple val(meta), path("*_{1,2}.fq.gz"), path("*_other.fq.gz"), path("*_singleton.fq.gz"), emit: reads
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
samtools collate \\
$args \\
--threads $task.cpus \\
-O \\
$input \\
. |
samtools fastq \\
$args2 \\
--threads $task.cpus \\
-1 ${prefix}_1.fq.gz \\
-2 ${prefix}_2.fq.gz \\
-0 ${prefix}_other.fq.gz \\
-s ${prefix}_singleton.fq.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,48 @@
name: samtools_collatefastq
description: |
The module uses collate and then fastq methods from samtools to
convert a SAM, BAM or CRAM file to FASTQ format
keywords:
- bam2fq
- samtools
- fastq
tools:
- samtools:
description: Tools for dealing with SAM, BAM and CRAM files
homepage: None
documentation: http://www.htslib.org/doc/1.1/samtools.html
tool_dev_url: None
doi: ""
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- input:
type: file
description: BAM/CRAM/SAM file
pattern: "*.{bam,cram,sam}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: |
FASTQ files, which will be either a group of 4 files (read_1, read_2, other and singleton)
or a single interleaved .fq.gz file if the user chooses not to split the reads.
pattern: "*.fq.gz"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@lescai"
- "@maxulysse"

View file

@ -8,7 +8,7 @@ process SAMTOOLS_VIEW {
'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
input:
tuple val(meta), path(input)
tuple val(meta), path(input), path(index)
path fasta
output:

View file

@ -25,6 +25,10 @@ input:
type: file
description: BAM/CRAM/SAM file
pattern: "*.{bam,cram,sam}"
- index:
type: optional file
description: BAM.BAI/CRAM.CRAI file
pattern: "*.{.bai,.crai}"
- fasta:
type: optional file
description: Reference file the CRAM was created with

View file

@ -33,4 +33,15 @@ process STRANGER {
stranger: \$( stranger --version )
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.vcf.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
stranger: \$( stranger --version )
END_VERSIONS
"""
}

View file

@ -11,7 +11,7 @@ process TABIX_BGZIP {
tuple val(meta), path(input)
output:
tuple val(meta), path("*.gz"), emit: gz
tuple val(meta), path("${prefix}*"), emit: output
path "versions.yml" , emit: versions
when:
@ -19,9 +19,12 @@ process TABIX_BGZIP {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
prefix = task.ext.prefix ?: "${meta.id}"
in_bgzip = input.toString().endsWith(".gz")
command1 = in_bgzip ? '-d' : '-c'
command2 = in_bgzip ? '' : " > ${prefix}.${input.getExtension()}.gz"
"""
bgzip -c $args $input > ${prefix}.${input.getExtension()}.gz
bgzip $command1 $args -@${task.cpus} $input $command2
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -1,13 +1,14 @@
name: tabix_bgzip
description: Compresses files
description: Compresses/decompresses files
keywords:
- compress
- decompress
- bgzip
- tabix
tools:
- bgzip:
description: |
Bgzip compresses files in a similar manner to, and compatible with, gzip.
Bgzip compresses or decompresses files in a similar manner to, and compatible with, gzip.
homepage: https://www.htslib.org/doc/tabix.html
documentation: http://www.htslib.org/doc/bgzip.html
doi: 10.1093/bioinformatics/btp352
@ -18,19 +19,19 @@ input:
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- file:
- input:
type: file
description: text file
description: file to compress or to decompress
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- file:
- output:
type: file
description: Output compressed file
pattern: "*.{gz}"
description: Output compressed/decompressed file
pattern: "*."
- versions:
type: file
description: File containing software versions

View file

@ -26,6 +26,10 @@ allelecounter:
- modules/allelecounter/**
- tests/modules/allelecounter/**
amplify/predict:
- modules/amplify/predict/**
- tests/modules/amplify/predict/**
amps:
- modules/amps/**
- tests/modules/amps/**
@ -38,6 +42,10 @@ amrfinderplus/update:
- modules/amrfinderplus/update/**
- tests/modules/amrfinderplus/update/**
antismash/antismashlitedownloaddatabases:
- modules/antismash/antismashlitedownloaddatabases/**
- tests/modules/antismash/antismashlitedownloaddatabases/**
arriba:
- modules/arriba/**
- tests/modules/arriba/**
@ -166,6 +174,10 @@ bcftools/view:
- modules/bcftools/view/**
- tests/modules/bcftools/view/**
bclconvert:
- modules/bclconvert/**
- tests/modules/bclconvert/**
bedtools/bamtobed:
- modules/bedtools/bamtobed/**
- tests/modules/bedtools/bamtobed/**
@ -591,6 +603,10 @@ elprep/merge:
- modules/elprep/merge/**
- tests/modules/elprep/merge/**
elprep/split:
- modules/elprep/split/**
- tests/modules/elprep/split/**
emmtyper:
- modules/emmtyper/**
- tests/modules/emmtyper/**
@ -663,6 +679,10 @@ gatk4/applybqsr:
- modules/gatk4/applybqsr/**
- tests/modules/gatk4/applybqsr/**
gatk4/applybqsrspark:
- modules/gatk4/applybqsrspark/**
- tests/modules/gatk4/applybqsrspark/**
gatk4/applyvqsr:
- modules/gatk4/applyvqsr/**
- tests/modules/gatk4/applyvqsr/**
@ -671,6 +691,10 @@ gatk4/baserecalibrator:
- modules/gatk4/baserecalibrator/**
- tests/modules/gatk4/baserecalibrator/**
gatk4/baserecalibratorspark:
- modules/gatk4/baserecalibratorspark/**
- tests/modules/gatk4/baserecalibratorspark/**
gatk4/bedtointervallist:
- modules/gatk4/bedtointervallist/**
- tests/modules/gatk4/bedtointervallist/**
@ -747,6 +771,10 @@ gatk4/markduplicates:
- modules/gatk4/markduplicates/**
- tests/modules/gatk4/markduplicates/**
gatk4/markduplicatesspark:
- modules/gatk4/markduplicatesspark/**
- tests/modules/gatk4/markduplicatesspark/**
gatk4/mergebamalignment:
- modules/gatk4/mergebamalignment/**
- tests/modules/gatk4/mergebamalignment/**
@ -1559,6 +1587,10 @@ samtools/bam2fq:
- modules/samtools/bam2fq/**
- tests/modules/samtools/bam2fq/**
samtools/collatefastq:
- modules/samtools/collatefastq/**
- tests/modules/samtools/collatefastq/**
samtools/depth:
- modules/samtools/depth/**
- tests/modules/samtools/depth/**

View file

@ -0,0 +1,18 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { PRODIGAL } from '../../../modules/prodigal/main.nf' addParams( options: [:] )
include { AMPLIFY_PREDICT } from '../../../../modules/amplify/predict/main.nf' addParams( options: [:] )
workflow amplify_predict {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['contigs_fasta'], checkIfExists: true)
]
model_dir = []
PRODIGAL ( input, "gff" )
AMPLIFY_PREDICT ( PRODIGAL.out.amino_acid_fasta, model_dir)
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,9 @@
- name: amplify predict amplify_predict
command: nextflow run tests/modules/amplify/predict -entry amplify_predict -c tests/config/nextflow.config
tags:
- amplify/predict
- amplify
files:
- path: output/amplify/test.tsv
md5sum: 1951084ce1d410028be86754997e5852
- path: output/amplify/versions.yml

View file

@ -0,0 +1,29 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { UNTAR as UNTAR1 } from '../../../../modules/untar/main.nf'
include { UNTAR as UNTAR2 } from '../../../../modules/untar/main.nf'
include { UNTAR as UNTAR3 } from '../../../../modules/untar/main.nf'
include { ANTISMASH_ANTISMASHLITEDOWNLOADDATABASES } from '../../../../modules/antismash/antismashlitedownloaddatabases/main.nf'
workflow test_antismash_antismashlitedownloaddatabases {
input1 = [
[],
file('https://github.com/nf-core/test-datasets/raw/modules/data/delete_me/antismash/css.tar.gz', checkIfExists: true)
]
input2 = [
[],
file('https://github.com/nf-core/test-datasets/raw/modules/data/delete_me/antismash/detection.tar.gz', checkIfExists: true)
]
input3 = [
[],
file('https://github.com/nf-core/test-datasets/raw/modules/data/delete_me/antismash/modules.tar.gz', checkIfExists: true)
]
UNTAR1 ( input1 )
UNTAR2 ( input2 )
UNTAR3 ( input3 )
ANTISMASH_ANTISMASHLITEDOWNLOADDATABASES ( UNTAR1.out.untar.map{ it[1] }, UNTAR2.out.untar.map{ it[1] }, UNTAR3.out.untar.map{ it[1] } )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,14 @@
- name: antismash antismashlitedownloaddatabases test_antismash_antismashlitedownloaddatabases
command: nextflow run tests/modules/antismash/antismashlitedownloaddatabases -entry test_antismash_antismashlitedownloaddatabases -c tests/config/nextflow.config
tags:
- antismash/antismashlitedownloaddatabases
- antismash
files:
- path: output/antismash/versions.yml
md5sum: e2656c8d2bcc7469eba40eb1ee5c91b3
- path: output/antismash/antismash_db
- path: output/antismash/antismash_db/clusterblast
- path: output/antismash/antismash_db/clustercompare
- path: output/antismash/antismash_db/pfam
- path: output/antismash/antismash_db/resfam
- path: output/antismash/antismash_db/tigrfam

View file

@ -0,0 +1,22 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BCLCONVERT } from '../../../modules/bclconvert/main.nf'
process STUB_BCLCONVERT_INPUT {
output:
path "SampleSheet.csv" ,emit: samplesheet
path "DDMMYY_SERIAL_FLOWCELL" ,emit: run_dir
stub:
"""
mkdir DDMMYY_SERIAL_FLOWCELL
echo "SampleSheet" > SampleSheet.csv
"""
}
workflow test_bclconvert {
STUB_BCLCONVERT_INPUT ()
BCLCONVERT (STUB_BCLCONVERT_INPUT.out.samplesheet, STUB_BCLCONVERT_INPUT.out.run_dir)
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,52 @@
- name: bclconvert test_bclconvert
command: nextflow run tests/modules/bclconvert -entry test_bclconvert -c tests/config/nextflow.config -stub-run
tags:
- bclconvert
files:
- path: output/bclconvert/InterOp/InterOp.bin
md5sum: d3dea0bb4ab1c8754af324f40b001481
- path: output/bclconvert/Logs/Errors.log
md5sum: 334645f09074b2597a692e395b716a9c
- path: output/bclconvert/Logs/FastqComplete.txt
md5sum: a4c4c6ce2d0de67d3b7ac7d1fcb512e4
- path: output/bclconvert/Logs/Info.log
md5sum: d238822d379f2277cac950ca986cb660
- path: output/bclconvert/Logs/Warnings.log
md5sum: aeefd2d631817e170f88f25ecaaf4664
- path: output/bclconvert/Reports/Adapter_Metrics.csv
md5sum: af62e9c7b44940cfd8ea11064a1f42ae
- path: output/bclconvert/Reports/Demultiplex_Stats.csv
md5sum: d11313931fcaabb5ce159462ad3dd1da
- path: output/bclconvert/Reports/IndexMetricsOut.bin
md5sum: 6bcee11c8145e3b1059ceaa91d2f5be7
- path: output/bclconvert/Reports/Index_Hopping_Counts.csv
md5sum: 697e40e0c0d48b4bd25f138ef60b0bde
- path: output/bclconvert/Reports/Quality_Metrics.csv
md5sum: 3902fd38f6b01f1ce0f0e8724238f8f2
- path: output/bclconvert/Reports/RunInfo.xml
md5sum: 5bef7c7e76360231b0c4afdfc915fd44
- path: output/bclconvert/Reports/SampleSheet.csv
md5sum: c579e7d2c9c917c4cfb875a0373c0936
- path: output/bclconvert/Reports/Top_Unknown_Barcodes.csv
md5sum: 39a5e7f6d21c12d6051afdc8261b6330
- path: output/bclconvert/Reports/fastq_list.csv
md5sum: 32c51ab10e013fd547928de57361ffcb
- path: output/bclconvert/sample1_S1_L001_R1_001.fastq.gz
md5sum: 9b831a39755935333f86f167527a094d
- path: output/bclconvert/sample1_S1_L001_R2_001.fastq.gz
md5sum: 082f4f767b7619f409ca7e752ef482bf
- path: output/bclconvert/sample1_S1_L002_R1_001.fastq.gz
md5sum: 837764c89db93dfb53cd663c4f26f3d7
- path: output/bclconvert/sample1_S1_L002_R2_001.fastq.gz
md5sum: 1a42cf6ba0bb8fc7770f278e6d1ab676
- path: output/bclconvert/sample2_S2_L001_R1_001.fastq.gz
md5sum: 475bc426b7cc48d0551d40e31457dc78
- path: output/bclconvert/sample2_S2_L001_R2_001.fastq.gz
md5sum: f670ccd7d9352e0e67fe1c1232429d94
- path: output/bclconvert/sample2_S2_L002_R1_001.fastq.gz
md5sum: ebd5ff6fa5603e7d704b5a10598de58c
- path: output/bclconvert/sample2_S2_L002_R2_001.fastq.gz
md5sum: 2f83b460f52620d2548c7ef8845b31d7
- path: output/stub/SampleSheet.csv
md5sum: c579e7d2c9c917c4cfb875a0373c0936
- path: output/bclconvert/versions.yml

View file

@ -0,0 +1,15 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { ELPREP_SPLIT } from '../../../../modules/elprep/split/main.nf'
workflow test_elprep_split {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true)
]
ELPREP_SPLIT ( input )
}

View file

@ -0,0 +1,9 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName : ELPREP_SPLIT {
ext.args = "--contig-group-size 1 --output-type bam"
}
}

View file

@ -0,0 +1,10 @@
- name: elprep split test_elprep_split
command: nextflow run tests/modules/elprep/split -entry test_elprep_split -c tests/config/nextflow.config
tags:
- elprep
- elprep/split
files:
- path: output/elprep/splits/test-group00001.bam
- path: output/elprep/splits/test-unmapped.bam
- path: output/elprep/test-spread.bam
- path: output/elprep/versions.yml

View file

@ -12,7 +12,7 @@ workflow test_kraken2_kraken2_single_end {
db = [ [], file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] } )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] }, true, false )
}
workflow test_kraken2_kraken2_paired_end {
@ -23,5 +23,15 @@ workflow test_kraken2_kraken2_paired_end {
db = [ [], file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] } )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] }, true, false )
}
workflow test_kraken2_kraken2_classifyreads {
input = [ [ id:'test', single_end:true ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) ]
]
db = [ [], file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] }, false, true )
}

View file

@ -1,29 +1,43 @@
- name: kraken2 kraken2 single-end
command: nextflow run ./tests/modules/kraken2/kraken2 -entry test_kraken2_kraken2_single_end -c ./tests/config/nextflow.config -c ./tests/modules/kraken2/kraken2/nextflow.config
- name: kraken2 kraken2 test_kraken2_kraken2_single_end
command: nextflow run tests/modules/kraken2/kraken2 -entry test_kraken2_kraken2_single_end -c tests/config/nextflow.config
tags:
- kraken2
- kraken2/kraken2
files:
- path: output/kraken2/test.classified.fastq.gz
should_exist: true
- path: output/kraken2/test.unclassified.fastq.gz
should_exist: true
- path: output/kraken2/test.kraken2.report.txt
md5sum: 4227755fe40478b8d7dc8634b489761e
- path: output/kraken2/test.unclassified.fastq.gz
- path: output/kraken2/versions.yml
md5sum: 6e3ad947ac8dee841a89216071c181cc
- path: output/untar/versions.yml
- name: kraken2 kraken2 paired-end
command: nextflow run ./tests/modules/kraken2/kraken2 -entry test_kraken2_kraken2_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/kraken2/kraken2/nextflow.config
- name: kraken2 kraken2 test_kraken2_kraken2_paired_end
command: nextflow run tests/modules/kraken2/kraken2 -entry test_kraken2_kraken2_paired_end -c tests/config/nextflow.config
tags:
- kraken2
- kraken2/kraken2
files:
- path: output/kraken2/test.classified_1.fastq.gz
should_exist: true
- path: output/kraken2/test.classified_2.fastq.gz
should_exist: true
- path: output/kraken2/test.unclassified_1.fastq.gz
should_exist: true
- path: output/kraken2/test.unclassified_2.fastq.gz
should_exist: true
- path: output/kraken2/test.kraken2.report.txt
md5sum: 4227755fe40478b8d7dc8634b489761e
- path: output/kraken2/test.unclassified_1.fastq.gz
- path: output/kraken2/test.unclassified_2.fastq.gz
- path: output/kraken2/versions.yml
md5sum: 604482fe7a4519f890fae9c8beb1bd6e
- path: output/untar/versions.yml
- name: kraken2 kraken2 test_kraken2_kraken2_classifyreads
command: nextflow run tests/modules/kraken2/kraken2 -entry test_kraken2_kraken2_classifyreads -c tests/config/nextflow.config
tags:
- kraken2
- kraken2/kraken2
files:
- path: output/kraken2/test.kraken2.classifiedreads.txt
md5sum: e7a90531f0d8d777316515c36fe4cae0
- path: output/kraken2/test.kraken2.report.txt
md5sum: 4227755fe40478b8d7dc8634b489761e
- path: output/kraken2/versions.yml
md5sum: 3488c304259e83c5bea573403293fce9
- path: output/untar/versions.yml

View file

@ -5,4 +5,4 @@
- picard/collecthsmetrics
files:
# The file can't be md5'd consistently
- path: output/picard/test_collecthsmetrics.txt
- path: output/picard/test.CollectHsMetrics.coverage_metrics

View file

@ -0,0 +1,13 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SAMTOOLS_COLLATEFASTQ } from '../../../../modules/samtools/collatefastq/main.nf'
workflow test_samtools_collatefastq {
input = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true)
]
SAMTOOLS_COLLATEFASTQ ( input )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,14 @@
- name: samtools fastq test_samtools_collatefastq
command: nextflow run ./tests/modules/samtools/collatefastq -entry test_samtools_collatefastq -c ./tests/config/nextflow.config -c ./tests/modules/samtools/collatefastq/nextflow.config
tags:
- samtools
- samtools/collatefastq
files:
- path: output/samtools/test_1.fq.gz
md5sum: 829732de4e937edca90f27b07e5b501a
- path: output/samtools/test_2.fq.gz
md5sum: ef27d3809e495620fd93df894280c03a
- path: output/samtools/test_other.fq.gz
md5sum: 709872fc2910431b1e8b7074bfe38c67
- path: output/samtools/test_singleton.fq.gz
md5sum: 709872fc2910431b1e8b7074bfe38c67

View file

@ -6,7 +6,8 @@ include { SAMTOOLS_VIEW } from '../../../../modules/samtools/view/main.nf'
workflow test_samtools_view {
input = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true)
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true),
[]
]
SAMTOOLS_VIEW ( input, [] )

View file

@ -23,3 +23,8 @@ workflow test_stranger_without_optional_variant_catalog {
EXPANSIONHUNTER ( input, fasta, variant_catalog )
STRANGER ( EXPANSIONHUNTER.out.vcf, [] )
}
workflow test_stranger_without_optional_variant_catalog_stubs {
EXPANSIONHUNTER ( input, fasta, variant_catalog )
STRANGER ( EXPANSIONHUNTER.out.vcf, [] )
}

View file

@ -25,3 +25,13 @@
md5sum: bbe15159195681d5c18596d3ad85c78f
- path: output/stranger/versions.yml
md5sum: 8558542a007e90ea5dcdceed3f12585d
- name: stranger test_stranger_without_optional_variant_catalog_stubs
command: nextflow run tests/modules/stranger -entry test_stranger_without_optional_variant_catalog -c tests/config/nextflow.config -stub-run
tags:
- stranger
files:
- path: output/expansionhunter/test.vcf
- path: output/expansionhunter/versions.yml
- path: output/stranger/test.vcf.gz
- path: output/stranger/versions.yml

View file

@ -4,10 +4,18 @@ nextflow.enable.dsl = 2
include { TABIX_BGZIP } from '../../../../modules/tabix/bgzip/main.nf'
workflow test_tabix_bgzip {
workflow test_tabix_bgzip_compress {
input = [ [ id:'test' ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_vcf'], checkIfExists: true) ]
]
TABIX_BGZIP ( input )
}
workflow test_tabix_bgzip_decompress {
input = [ [ id:'test' ], // meta map
[ file(params.test_data['sarscov2']['genome']['test_bed_gz'], checkIfExists: true) ]
]
TABIX_BGZIP ( input )
}

View file

@ -1,8 +1,16 @@
- name: tabix bgzip
command: nextflow run ./tests/modules/tabix/bgzip -entry test_tabix_bgzip -c ./tests/config/nextflow.config -c ./tests/modules/tabix/bgzip/nextflow.config
- name: tabix bgzip compress
command: nextflow run ./tests/modules/tabix/bgzip -entry test_tabix_bgzip_compress -c ./tests/config/nextflow.config -c ./tests/modules/tabix/bgzip/nextflow.config
tags:
- tabix
- tabix/bgzip
files:
- path: ./output/tabix/test.vcf.gz
md5sum: fc178eb342a91dc0d1d568601ad8f8e2
- name: tabix bgzip decompress
command: nextflow run ./tests/modules/tabix/bgzip -entry test_tabix_bgzip_decompress -c ./tests/config/nextflow.config -c ./tests/modules/tabix/bgzip/nextflow.config
tags:
- tabix
- tabix/bgzip
files:
- path: ./output/tabix/test.bed
md5sum: fe4053cf4de3aebbdfc3be2efb125a74