Merge branch 'nf-core:master' into motus_profile

This commit is contained in:
JIANHONG OU 2022-04-28 08:09:47 -04:00 committed by GitHub
commit 61799e80ab
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
98 changed files with 1822 additions and 141 deletions

View file

@ -0,0 +1,41 @@
def VERSION = '1.0.3' // Version information not provided by tool
process AMPLIFY_PREDICT {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::amplify=1.0.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/amplify:1.0.3--py36hdfd78af_0':
'quay.io/biocontainers/amplify:1.0.3--py36hdfd78af_0' }"
input:
tuple val(meta), path(faa)
path(model_dir)
output:
tuple val(meta), path('*.tsv'), emit: tsv
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def custom_model_dir = model_dir ? "-md ${model_dir}" : ""
"""
AMPlify \\
$args \\
${custom_model_dir} \\
-s '${faa}'
#rename output, because tool includes date and time in name
mv *.tsv ${prefix}.tsv
cat <<-END_VERSIONS > versions.yml
"${task.process}":
AMPlify: $VERSION
END_VERSIONS
"""
}

View file

@ -0,0 +1,47 @@
name: "amplify_predict"
description: AMPlify is an attentive deep learning model for antimicrobial peptide prediction.
keywords:
- antimicrobial peptides
- AMPs
- prediction
- model
tools:
- "amplify":
description: "Attentive deep learning model for antimicrobial peptide prediction"
homepage: "https://github.com/bcgsc/AMPlify"
documentation: "https://github.com/bcgsc/AMPlify"
tool_dev_url: "https://github.com/bcgsc/AMPlify"
doi: "https://doi.org/10.1186/s12864-022-08310-4"
licence: "['GPL v3']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- faa:
type: file
description: amino acid sequences fasta
pattern: "*.{fa,fa.gz,faa,faa.gz,fasta,fasta.gz}"
- model_dir:
type: directory
description: Directory of where models are stored (optional)
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- tsv:
type: file
description: amino acid sequences with prediction (AMP, non-AMP) and probability scores
pattern: "*.{tsv}"
authors:
- "@louperelo"

View file

@ -0,0 +1,50 @@
process ANTISMASH_ANTISMASHLITEDOWNLOADDATABASES {
label 'process_low'
conda (params.enable_conda ? "bioconda::antismash-lite=6.0.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/antismash-lite:6.0.1--pyhdfd78af_1' :
'quay.io/biocontainers/antismash-lite:6.0.1--pyhdfd78af_1' }"
/*
These files are normally downloaded/created by download-antismash-databases itself, and must be retrieved for input by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines. This is solely for use for CI tests of the nf-core/module version of antiSMASH.
Reason: Upon execution, the tool checks if certain database files are present within the container and if not, it tries to create them in /usr/local/bin, for which only root user has write permissions. Mounting those database files with this module prevents the tool from trying to create them.
These files are also emitted as output channels in this module to enable the antismash-lite module to use them as mount volumes to the docker/singularity containers.
*/
containerOptions {
workflow.containerEngine == 'singularity' ?
"-B $database_css:/usr/local/lib/python3.8/site-packages/antismash/outputs/html/css,$database_detection:/usr/local/lib/python3.8/site-packages/antismash/detection,$database_modules:/usr/local/lib/python3.8/site-packages/antismash/modules" :
workflow.containerEngine == 'docker' ?
"-v \$PWD/$database_css:/usr/local/lib/python3.8/site-packages/antismash/outputs/html/css -v \$PWD/$database_detection:/usr/local/lib/python3.8/site-packages/antismash/detection -v \$PWD/$database_modules:/usr/local/lib/python3.8/site-packages/antismash/modules" :
''
}
input:
path database_css
path database_detection
path database_modules
output:
path("antismash_db") , emit: database
path("css"), emit: css_dir
path("detection"), emit: detection_dir
path("modules"), emit: modules_dir
path "versions.yml", emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
"""
download-antismash-databases \\
--database-dir antismash_db \\
$args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
antismash-lite: \$(antismash --version | sed 's/antiSMASH //')
END_VERSIONS
"""
}

View file

@ -0,0 +1,70 @@
name: antismash_antismashlitedownloaddatabases
description: antiSMASH allows the rapid genome-wide identification, annotation and analysis of secondary metabolite biosynthesis gene clusters. This module downloads the antiSMASH databases.
keywords:
- secondary metabolites
- BGC
- biosynthetic gene cluster
- genome mining
- NRPS
- RiPP
- antibiotics
- prokaryotes
- bacteria
- eukaryotes
- fungi
- antismash
- database
tools:
- antismash:
description: antiSMASH - the antibiotics and Secondary Metabolite Analysis SHell
homepage: https://docs.antismash.secondarymetabolites.org
documentation: https://docs.antismash.secondarymetabolites.org
tool_dev_url: https://github.com/antismash/antismash
doi: "10.1093/nar/gkab335"
licence: ["AGPL v3"]
input:
- database_css:
type: directory
description: |
antismash/outputs/html/css folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the user by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "css"
- database_detection:
type: directory
description: |
antismash/detection folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the user by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "detection"
- database_modules:
type: directory
description: |
antismash/modules folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the user by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "modules"
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- database:
type: directory
description: Download directory for antiSMASH databases
pattern: "antismash_db"
- css_dir:
type: directory
description: |
antismash/outputs/html/css folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the user by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "css"
- detection_dir:
type: directory
description: |
antismash/detection folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the user by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "detection"
- modules_dir:
type: directory
description: |
antismash/modules folder which is being created during the antiSMASH database downloading step. These files are normally downloaded by download-antismash-databases itself, and must be retrieved by the user by manually running the command with conda or a standalone installation of antiSMASH. Therefore we do not recommend using this module for production pipelines, but rather require users to specify their own local copy of the antiSMASH database in pipelines.
pattern: "modules"
authors:
- "@jasmezz"

2
modules/bclconvert/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
bcl-convert
*.rpm

View file

@ -0,0 +1,15 @@
# Dockerfile to create container with bcl-convert
# Push to nfcore/bclconvert:<VER>
FROM debian:bullseye-slim
LABEL authors="Matthias De Smet <matthias.desmet@ugent.be>" \
description="Docker image containing bcl-convert"
# Disclaimer: this container is not provided nor supported by Illumina
# 'ps' command is need by some nextflow executions to collect system stats
# Install procps and clean apt cache
RUN apt-get update \
&& apt-get install -y \
procps \
&& apt-get clean -y && rm -rf /var/lib/apt/lists/*
COPY bcl-convert /usr/local/bin/bcl-convert
RUN chmod +x /usr/local/bin/bcl-convert

View file

@ -0,0 +1,30 @@
ILLUMINA END-USER SOFTWARE LICENSE AGREEMENT
IMPORTANT-READ CAREFULLY. THIS IS A LICENSE AGREEMENT THAT YOU ARE REQUIRED TO ACCEPT BEFORE, DOWNLOADING, INSTALLING AND USING ANY SOFTWARE MADE AVAILABLE FROM THE ILLUMINA SUPPORT CENTER (https://support.illumina.com).
CAREFULLY READ ALL THE TERMS AND CONDITIONS OF THIS LICENSE AGREEMENT BEFORE PROCEEDING WITH DOWNLOADING, INSTALLING, AND/OR USING THE SOFTWARE. YOU ARE NOT PERMITTED TO DOWNLOAD, INSTALL, AND/OR USE THE SOFTWARE UNTIL YOU HAVE AGREED TO BE BOUND BY ALL OF THE TERMS AND CONDITIONS OF THIS LICENSE AGREEMENT. YOU REPRESENT AND WARRANT THAT YOU ARE DULY AUTHORIZED TO ACCEPT THE TERMS AND CONDITIONS OF THIS LICENSE AGREEMENT ON BEHALF OF YOUR EMPLOYER.
Software made available through the Illumina Support Center is licensed, not sold, to you. Your license to each software program made available through the Illumina Support Center is subject to your prior acceptance of either this Illumina End-User Software License Agreement (“Agreement”), or a custom end user license agreement (“Custom EULA”), if one is provided with the software. Any software that is subject to this Agreement is referred to herein as the “Software.” By accepting this Agreement, you agree the terms and conditions of this Agreement will apply to and govern any and all of your downloads, installations, and uses of each Illumina software program made available through the Illumina Support Center, except that your download, installation, and use of any software provided with a Custom EULA will be governed by the terms and conditions of the Custom EULA.
This Agreement is made and entered into by and between Illumina, Inc., a Delaware corporation, having offices at 5200 Illumina Way, San Diego, CA 92122 (“Illumina”) and you as the end-user of the Software (hereinafter, “Licensee” or “you”). All software, firmware, and associated media, printed materials, and online and electronic documentation, including any updates or upgrades thereof, made available through the Illumina Support Center (collectively, “Software”) provided to Licensee are for use solely by Licensee and the provisions herein WILL apply with respect to such Software.
License Grant. Subject to the terms and conditions of this Agreement, Illumina grants to Licensee, under the following terms and conditions, a personal, non-exclusive, revocable, non-transferable, non-sublicensable license, for its internal end-use purposes only, in the ordinary course of Licensees business to use the Software in executable object code form only, solely at the Licensees facility to, install and use the Software on a single computer accessible only by Licensee (and not on any public network or server), where the single computer is owned, leased, or otherwise substantially controlled by Licensee, for the purpose of processing and analyzing data generated from an Illumina genetic sequencing instrument owned and operated solely by Licensee (the “Product”). In the case of Software provided by Illumina in non-compiled form, Illumina grants Licensee a personal, non-exclusive, non-sublicenseable, restricted right to compile, install, and use one copy of the Software solely for processing and analyzing data generated from the Product.
License Restrictions. Except as expressly permitted in Section 1, Licensee may not make, have made, import, use, copy, reproduce, distribute, display, publish, sell, re-sell, lease, or sub-license the Software, in whole or in part, except as expressly provided for in this Agreement. Licensee may not modify, improve, translate, reverse engineer, decompile, disassemble, or create derivative works of the Software or otherwise attempt to (a) defeat, avoid, by-pass, remove, deactivate, or otherwise circumvent any software protection mechanisms in the Software including, without limitation, any such mechanism used to restrict or control the functionality of the Software, or (b) derive the source code or the underlying ideas, algorithms, structure, or organization form of the Software. Licensee will not allow, at any time, including during and after the term of the license, the Software or any portions or copies thereof in any form to become available to any third parties. Licensee may use the Software solely with genomic data that is generated using the Product; Licensee may not use the Software with any data generated from other products or instruments. Licensee may not use the Software to perform any data analysis services for any third party.
Ownership. The Software is protected by United States and international intellectual property laws. All right, title, and interest in and to the Software (including associated intellectual property rights) are and will remain vested in Illumina or Illuminas affiliated companies or licensors. Licensee acknowledges that no rights, license or interest to any Illumina trademarks are granted hereunder. Licensee acknowledges that unauthorized reproduction or distribution of the Software, or any portion of it, may result in severe civil and criminal penalties. Illumina reserves all rights in and to the Software not expressly granted to Licensee under this Agreement.
Upgrades/Updates. Illumina may, at its sole discretion, provide updates or upgrades to the Software. In that case, Licensee WILL have the same rights and obligations under such updates or upgrades as it has for the versions of the Software initially provided to Licensee hereunder. Licensee recognizes that Illumina is not obligated to provide any upgrades or updates to, or support for, the Software.
Data Integrity/Loss. Licensee is responsible for the integrity and availability, including preventing the loss of data that Licensee generates, uses, analyzes, manages, or stores in connection with or through its use of the Software, including without limitation, investigating and implementing industry appropriate policies and procedures regarding the provision of access to Licensees data, monitoring access and use of Licensees data, conducting routine backups and archiving of Licensees data, and ensuring the adequacy of anti-virus software. Accordingly, Licensee agrees that Illumina is not responsible for any inability to access, loss or corruption of data as a result of Licensees use of the Software, and Illumina has no liability to Licensee in connection with such inability to access, loss or corruption of data.
Term of License. This Agreement will be in effect from the time Licensee expressly accepts the terms and conditions of this license, or otherwise installs the Software, thereby accepting the terms and conditions contained herein, and will remain in effect until terminated. This license will otherwise terminate upon the conditions set forth in this Agreement, if revoked by Illumina, or if Licensee fails to comply with any term or condition of this Agreement including failure to pay any applicable license fee. Licensee agrees upon termination of this Agreement for any reason to immediately discontinue use of and un-install the Software and destroy all copies of the Software in its possession and/or under its control, and return or destroy, at Illuminas option, any compact disks, floppy disks or other media provided by Illumina storing the Software thereon (together with any authorized copies thereof), as well as any documentation associated therewith
Limited Warranty. Illumina warrants that, for a period of 6 months from the date of download or installation of the Software by Licensee, the Software will perform in all material respects in accordance with the accompanying documentation available on the Illumina Support Center. EXCEPT AND TO THE EXTENT EXPRESSLY PROVIDED IN THE FOREGOING, AND TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED “AS IS” AND ILLUMINA EXPRESSLY DISCLAIMS ALL WARRANTIES AND CONDITIONS REGARDING THE SOFTWARE AND RESULTS GENERATED BY THE SOFTWARE, INCLUDING WITHOUT LIMITATION, TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, ALL OTHER EXPRESS OR IMPLIED WARRANTIES OR CONDITIONS OF MERCHANTABLE QUALITY, NON-INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE, AND THOSE ARISING BY STATUTE OR OTHERWISE IN LAW OR FROM A COURSE OF DEALING OR USAGE OF TRADE. ILLUMINA DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN THE SOFTWARE WILL MEET LICENSEE"S REQUIREMENTS, OR THAT THE OPERATION OF THE SOFTWARE WILL BE ERROR FREE OR UNINTERRUPTED.
Limitation of Liability.
(a) ILLUMINAS ENTIRE LIABILITY AND LICENSEE"S EXCLUSIVE REMEDY UNDER THE LIMITED WARRANTY PROVISION OF SECTION 7 ABOVE WILL BE, AT ILLUMINAS OPTION, EITHER (i) RETURN OF THE PRICE PAID FOR THE SOFTWARE, OR (ii) REPAIR OR REPLACEMENT OF THE PORTIONS OF THE SOFTWARE THAT DO NOT COMPLY WITH ILLUMINAS LIMITED WARRANTY. THIS LIMITED WARRANTY IS VOID AND ILLUMINA WILL HAVE NO LIABILITY AT ALL IF FAILURE OF THE SOFTWARE TO COMPLY WITH ILLUMINA LIMITED WARRANTY HAS RESULTED FROM: (w) FAILURE TO USE THE SOFTWARE IN ACCORDANCE WITH ILLUMINAS THEN CURRENT USER MANUAL OR THIS AGREEMENT; (x) ACCIDENT, ABUSE, OR MISAPPLICATION; (y) PRODUCTS OR EQUIPMENT NOT SPECIFIED BY ILLUMINA AS BEING COMPATIBLE WITH THE SOFTWARE; OR (z) IF LICENSEE HAS NOT NOTIFIED ILLUMINA IN WRITING OF THE DEFECT WITHIN THE ABOVE WARRANTY PERIOD.
(b) TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL ILLUMINA BE LIABLE UNDER ANY THEORY OF CONTRACT, TORT, STRICT LIABILITY OR OTHER LEGAL OR EQUITABLE THEORY FOR ANY PERSONAL INJURY OR ANY INDIRECT, CONSEQUENTIAL, OR INCIDENTAL DAMAGES, EVEN IF ILLUMINA HAS BEEN ADVISED OF THE POSSIBILITY THEREOF INCLUDING, WITHOUT LIMITATION, LOST PROFITS, LOST DATA, INTERRUPTION OF BUSINESS, LOST BUSINESS REVENUE, OTHER ECONOMIC LOSS, OR ANY LOSS OF RECORDED DATA ARISING OUT OF THE USE OF OR INABILITY TO USE THE SOFTWARE. EXCEPT AND TO THE EXTENT EXPRESSLY PROVIDED IN SECTION 7 AND 8(a) ABOVE OR AS OTHERWISE PERMITTED BY LAW, IN NO EVENT WILL ILLUMINAS TOTAL LIABILITY TO LICENSEE FOR ALL DAMAGES (OTHER THAN AS MAY BE REQUIRED BY APPLICABLE LAW IN CASES INVOLVING PERSONAL INJURY) EXCEED THE AMOUNT OF $500 USD. THE FOREGOING LIMITATIONS WILL APPLY EVEN IF THE ABOVE STATED REMEDY FAILS OF ITS ESSENTIAL PURPOSE.
Survival. The limitations of liability and ownership rights of Illumina contained herein and Licensees obligations following termination of this Agreement WILL survive the termination of this Agreement for any reason.
Research Use Only. The Software is labeled with a For Research Use Only or similar labeling statement and the performance characteristics of the Software have not been established and the Software is not for use in diagnostic procedures. Licensee acknowledges and agrees that (i) the Software has not been approved, cleared, or licensed by the United States Food and Drug Administration or any other regulatory entity whether foreign or domestic for any specific intended use, whether research, commercial, diagnostic, or otherwise, and (ii) Licensee must ensure it has any regulatory approvals that are necessary for Licensees intended uses of the Software. Licensee will comply with all applicable laws and regulations when using and maintaining the Software.
General. Licensee may not sublicense, assign, share, pledge, rent or transfer any of its rights under this Agreement in relation to the Software or any portion thereof including documentation. Illumina reserves the right to change this Agreement at any time. When Illumina makes any changes, Illumina will provide the updated Agreement, or a link to it, on Illuminas website (www.illumina.com) and such updated Agreement WILL become effective immediately. Licensees continued access to or use of the Software represents Licensees agreement to any revised Agreement. If one or more provisions of this Agreement are found to be invalid or unenforceable, this Agreement WILL not be rendered inoperative but the remaining provisions WILL continue in full force and effect. This Agreement constitutes the entire agreement between the parties with respect to the subject matter of this Agreement and merges all prior communications except that a “hard-copy” form of licensing agreement relating to the Software previously agreed to in writing by Illumina and Licensee WILL supersede and govern in the event of any conflicting provisions.
Governing Law. This Agreement WILL be governed by and construed in accordance with the laws of the state of California, USA, without regard to its conflicts of laws principles, and independent of where a suit or action hereunder may be filed.
U.S. Government End Users. If Licensee is a branch agency or instrumentality of the United States Government, the following provision applies. The Software is a “commercial item” as that term is defined at 48 C.F.R. 2.101, consisting of “commercial computer software” and “commercial computer software documentation,” as such terms are used in 48 C.F.R. 12.212 or 48 C.F.R. 227.7202 (as applicable). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4, all United States Government end users acquire the Software with only those rights set forth herein.
Contact. Any questions regarding legal rights, duties, obligations, or restrictions associated with the software hereunder should be directed to Illumina, Inc., 5200 Illumina Way, San Diego, CA 92122, Attention: Legal Department, Phone: (858) 202-4500, Fax: (858) 202-4599, web site: www.illumina.com <http://www.illumina.com>.
Third Party Components. The Software may include third party software (“Third Party Programs”). Some of the Third Party Programs are available under open source or free software licenses. The License Agreement accompanying the Licensed Software does not alter any rights or obligations Licensee may have under those open source or free software licenses. The licenses that govern the terms and conditions of use of the Third Party Programs included in the Licensed Software are provided in the READ ME provided with the Software. The READ ME also contains copyright statements for the various open source software components (or portions thereof) that are distributed with the Licensed Software.
END OF END-USER SOFTWARE LICENSE AGREEMENT.

View file

@ -0,0 +1,17 @@
# Updating the docker container and making a new module release
bcl-convert is a commercial tool from Illumina. The container provided for the bcl-convert nf-core module is not provided nor supported by Illumina. Updating the bcl-convert versions in the container and pushing the update to Dockerhub needs to be done manually.
1. Navigate to the appropriate download page. - [BCL Convert](https://support.illumina.com/sequencing/sequencing_software/bcl-convert/downloads.html): download the rpm of the desired bcl-convert version with `curl` or `wget`.
2. Unpack the RPM package using `rpm2cpio bcl-convert-*.rpm | cpio -i --make-directories`. Place the executable located in `<unpack_dir>/usr/bin/bcl-convert` in the same folder where the Dockerfile lies.
3. Create and test the container:
```bash
docker build . -t nfcore/bclconvert:<VERSION>
```
4. Access rights are needed to push the container to the Dockerhub nfcore organization, please ask a core team member to do so.
```bash
docker push nfcore/bclconvert:<VERSION>
```

View file

@ -0,0 +1,81 @@
process BCLCONVERT {
tag '$samplesheet'
label 'process_high'
if (params.enable_conda) {
exit 1, "Conda environments cannot be used when using bcl-convert. Please use docker or singularity containers."
}
container "nfcore/bclconvert:3.9.3"
input:
path samplesheet
path run_dir
output:
path "*.fastq.gz" ,emit: fastq
path "Reports/*.{csv,xml,bin}" ,emit: reports
path "Logs/*.{log,txt}" ,emit: logs
path "InterOp/*.bin" ,emit: interop
path "versions.yml" ,emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
"""
bcl-convert \
$args \\
--output-directory . \\
--bcl-input-directory ${run_dir} \\
--sample-sheet ${samplesheet} \\
--bcl-num-parallel-tiles ${task.cpus}
mkdir InterOp
cp ${run_dir}/InterOp/*.bin InterOp/
mv Reports/*.bin InterOp/
cat <<-END_VERSIONS > versions.yml
"${task.process}":
bclconvert: \$(bcl-convert -V 2>&1 | head -n 1 | sed 's/^.*Version //')
END_VERSIONS
"""
stub:
"""
echo "sample1_S1_L001_R1_001" > sample1_S1_L001_R1_001.fastq.gz
echo "sample1_S1_L001_R2_001" > sample1_S1_L001_R2_001.fastq.gz
echo "sample1_S1_L002_R1_001" > sample1_S1_L002_R1_001.fastq.gz
echo "sample1_S1_L002_R2_001" > sample1_S1_L002_R2_001.fastq.gz
echo "sample2_S2_L001_R1_001" > sample2_S2_L001_R1_001.fastq.gz
echo "sample2_S2_L001_R2_001" > sample2_S2_L001_R2_001.fastq.gz
echo "sample2_S2_L002_R1_001" > sample2_S2_L002_R1_001.fastq.gz
echo "sample2_S2_L002_R2_001" > sample2_S2_L002_R2_001.fastq.gz
mkdir Reports
echo "Adapter_Metrics" > Reports/Adapter_Metrics.csv
echo "Demultiplex_Stats" > Reports/Demultiplex_Stats.csv
echo "fastq_list" > Reports/fastq_list.csv
echo "Index_Hopping_Counts" > Reports/Index_Hopping_Counts.csv
echo "IndexMetricsOut" > Reports/IndexMetricsOut.bin
echo "Quality_Metrics" > Reports/Quality_Metrics.csv
echo "RunInfo" > Reports/RunInfo.xml
echo "SampleSheet" > Reports/SampleSheet.csv
echo "Top_Unknown_Barcodes" > Reports/Top_Unknown_Barcodes.csv
mkdir Logs
echo "Errors" > Logs/Errors.log
echo "FastqComplete" > Logs/FastqComplete.txt
echo "Info" > Logs/Info.log
echo "Warnings" > Logs/Warnings.log
mkdir InterOp/
echo "InterOp" > InterOp/InterOp.bin
cat <<-END_VERSIONS > versions.yml
"${task.process}":
bclconvert: \$(bcl-convert -V 2>&1 | head -n 1 | sed 's/^.*Version //')
END_VERSIONS
"""
}

View file

@ -0,0 +1,45 @@
name: "bclconvert"
description: Demultiplex Illumina BCL files
keywords:
- demultiplex
- illumina
- fastq
tools:
- "bclconvert":
description: "Demultiplex Illumina BCL files"
homepage: "https://support.illumina.com/sequencing/sequencing_software/bcl-convert.html"
documentation: "https://support-docs.illumina.com/SW/BCL_Convert/Content/SW/FrontPages/BCL_Convert.htm"
licence: "ILLUMINA"
input:
- samplesheet:
type: file
description: "Input samplesheet"
pattern: "*.{csv}"
- run_dir:
type: directory
description: "Input run directory containing RunInfo.xml and BCL data"
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- fastq:
type: file
description: Demultiplexed FASTQ files
pattern: "*.{fastq.gz}"
- reports:
type: file
description: Demultiplexing Reports
pattern: "Reports/*.{csv,xml}"
- logs:
type: file
description: Log files
pattern: "Logs/*.{log,txt}"
- interop:
type: file
description: Interop files
pattern: "Interop/*.{bin}"
authors:
- "@matthdsm"

View file

@ -4,8 +4,8 @@ process CAT_FASTQ {
conda (params.enable_conda ? "conda-forge::sed=4.7" : null) conda (params.enable_conda ? "conda-forge::sed=4.7" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://containers.biocontainers.pro/s3/SingImgsRepo/biocontainers/v1.2.0_cv1/biocontainers_v1.2.0_cv1.img' : 'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
'biocontainers/biocontainers:v1.2.0_cv1' }" 'ubuntu:20.04' }"
input: input:
tuple val(meta), path(reads, stageAs: "input*/*") tuple val(meta), path(reads, stageAs: "input*/*")

View file

@ -2,10 +2,10 @@ process CUSTOM_GETCHROMSIZES {
tag "$fasta" tag "$fasta"
label 'process_low' label 'process_low'
conda (params.enable_conda ? "bioconda::samtools=1.15" : null) conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/samtools:1.15--h1170115_1' : 'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
'quay.io/biocontainers/samtools:1.15--h1170115_1' }" 'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
input: input:
path fasta path fasta

View file

@ -0,0 +1,89 @@
process ELPREP_FILTER {
tag "$meta.id"
label 'process_high'
conda (params.enable_conda ? "bioconda::elprep=5.1.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/elprep:5.1.2--he881be0_0':
'quay.io/biocontainers/elprep:5.1.2--he881be0_0' }"
input:
tuple val(meta), path(bam)
val(run_haplotypecaller)
val(run_bqsr)
path(reference_sequences)
path(filter_regions_bed)
path(reference_elfasta)
path(known_sites_elsites)
path(target_regions_bed)
path(intermediate_bqsr_tables)
val(bqsr_tables_only)
val(get_activity_profile)
val(get_assembly_regions)
output:
tuple val(meta), path("output/**.{bam,sam}") ,emit: bam
tuple val(meta), path("*.metrics.txt") ,optional: true, emit: metrics
tuple val(meta), path("*.recall") ,optional: true, emit: recall
tuple val(meta), path("*.vcf.gz") ,optional: true, emit: gvcf
tuple val(meta), path("*.table") ,optional: true, emit: table
tuple val(meta), path("*.activity_profile.igv") ,optional: true, emit: activity_profile
tuple val(meta), path("*.assembly_regions.igv") ,optional: true, emit: assembly_regions
path "versions.yml" ,emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def suffix = args.contains("--output-type sam") ? "sam" : "bam"
// filter args
def reference_sequences_cmd = reference_sequences ? " --replace-reference-sequences ${reference_sequences}" : ""
def filter_regions_cmd = filter_regions_bed ? " --filter-non-overlapping-reads ${filter_regions_bed}" : ""
// markdup args
def markdup_cmd = args.contains("--mark-duplicates") ? " --mark-optical-duplicates ${prefix}.metrics.txt": ""
// variant calling args
def haplotyper_cmd = run_haplotypecaller ? " --haplotypecaller ${prefix}.g.vcf.gz": ""
def fasta_cmd = reference_elfasta ? " --reference ${reference_elfasta}": ""
def known_sites_cmd = known_sites_elsites ? " --known-sites ${known_sites_elsites}": ""
def target_regions_cmd = target_regions_bed ? " --target-regions ${target_regions_bed}": ""
// bqsr args
def bqsr_cmd = run_bqsr ? " --bqsr ${prefix}.recall": ""
def bqsr_tables_only_cmd = bqsr_tables_only ? " --bqsr-tables-only ${prefix}.table": ""
def intermediate_bqsr_cmd = intermediate_bqsr_tables ? " --bqsr-apply .": ""
// misc
def activity_profile_cmd = get_activity_profile ? " --activity-profile ${prefix}.activity_profile.igv": ""
def assembly_regions_cmd = get_assembly_regions ? " --assembly-regions ${prefix}.assembly_regions.igv": ""
"""
elprep filter ${bam} output/${prefix}.${suffix} \\
${reference_sequences_cmd} \\
${filter_regions_cmd} \\
${markdup_cmd} \\
${haplotyper_cmd} \\
${fasta_cmd} \\
${known_sites_cmd} \\
${target_regions_cmd} \\
${bqsr_cmd} \\
${bqsr_tables_only_cmd} \\
${intermediate_bqsr_cmd} \\
${activity_profile_cmd} \\
${assembly_regions_cmd} \\
--nr-of-threads ${task.cpus} \\
$args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
elprep: \$(elprep 2>&1 | head -n2 | tail -n1 |sed 's/^.*version //;s/ compiled.*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,106 @@
name: "elprep_filter"
description: "Filter, sort and markdup sam/bam files, with optional BQSR and variant calling."
keywords:
- sort
- bam
- sam
- filter
- variant calling
tools:
- "elprep":
description: "elPrep is a high-performance tool for preparing .sam/.bam files for variant calling in sequencing pipelines. It can be used as a drop-in replacement for SAMtools/Picard/GATK4."
homepage: "https://github.com/ExaScience/elprep"
documentation: "https://github.com/ExaScience/elprep"
tool_dev_url: "https://github.com/ExaScience/elprep"
doi: "10.1371/journal.pone.0244471"
licence: "['AGPL v3']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: Input SAM/BAM file
pattern: "*.{bam,sam}"
- run_haplotypecaller:
type: boolean
description: Run variant calling on the input files. Needed to generate gvcf output.
- run_bqsr:
type: boolean
description: Run BQSR on the input files. Needed to generate recall metrics.
- reference_sequences:
type: file
description: Optional SAM header to replace existing header.
pattern: "*.sam"
- filter_regions_bed:
type: file
description: Optional BED file containing regions to filter.
pattern: "*.bed"
- reference_elfasta:
type: file
description: Elfasta file, required for BQSR and variant calling.
pattern: "*.elfasta"
- known_sites:
type: file
description: Optional elsites file containing known SNPs for BQSR.
pattern: "*.elsites"
- target_regions_bed:
type: file
description: Optional BED file containing target regions for BQSR and variant calling.
pattern: "*.bed"
- intermediate_bqsr_tables:
type: file
description: Optional list of BQSR tables, used when parsing files created by `elprep split`
pattern: "*.table"
- bqsr_tables_only:
type: boolean
description: Write intermediate BQSR tables, used when parsing files created by `elprep split`.
- get_activity_profile:
type: boolean
description: Get the activity profile calculated by the haplotypecaller to the given file in IGV format.
- get_assembly_regions:
type: boolean
description: Get the assembly regions calculated by haplotypecaller to the speficied file in IGV format.
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- bam:
type: file
description: Sorted, markdup, optionally BQSR BAM/SAM file
pattern: "*.{bam,sam}"
- metrics:
type: file
description: Optional duplicate metrics file generated by elprep
pattern: "*.{metrics.txt}"
- recall:
type: file
description: Optional recall metrics file generated by elprep
pattern: "*.{recall}"
- gvcf:
type: file
description: Optional GVCF output file
pattern: "*.{vcf.gz}"
- table:
type: file
description: Optional intermediate BQSR table output file
pattern: "*.{table}"
- activity_profile:
type: file
description: Optional activity profile output file
pattern: "*.{activity_profile.igv}"
- assembly_regions:
type: file
description: Optional activity regions output file
pattern: "*.{assembly_regions.igv}"
authors:
- "@matthdsm"

View file

@ -0,0 +1,45 @@
process ELPREP_SPLIT {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::elprep=5.1.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/elprep:5.1.2--he881be0_0':
'quay.io/biocontainers/elprep:5.1.2--he881be0_0' }"
input:
tuple val(meta), path(bam)
output:
tuple val(meta), path("output/**.{bam,sam}"), emit: bam
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def single_end = meta.single_end ? " --single-end": ""
"""
# create directory and move all input so elprep can find and merge them before splitting
mkdir input
mv ${bam} input/
mkdir ${prefix}
elprep split \\
input \\
output/ \\
$args \\
$single_end \\
--nr-of-threads $task.cpus \\
--output-prefix $prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
elprep: \$(elprep 2>&1 | head -n2 | tail -n1 |sed 's/^.*version //;s/ compiled.*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,43 @@
name: "elprep_split"
description: Split bam file into manageable chunks
keywords:
- bam
- split by chromosome
tools:
- "elprep":
description: "elPrep is a high-performance tool for preparing .sam/.bam files for variant calling in sequencing pipelines. It can be used as a drop-in replacement for SAMtools/Picard/GATK4."
homepage: "https://github.com/ExaScience/elprep"
documentation: "https://github.com/ExaScience/elprep"
tool_dev_url: "https://github.com/ExaScience/elprep"
doi: "10.1371"
licence: "['AGPL v3']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: List of BAM/SAM files
pattern: "*.{bam,sam}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
#
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- bam:
type: file
description: List of split BAM/SAM files
pattern: "*.{bam,sam}"
authors:
- "@matthdsm"

41
modules/gamma/main.nf Normal file
View file

@ -0,0 +1,41 @@
def VERSION = '2.1' // Version information not provided by tool on CLI
process GAMMA {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::gamma=2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/gamma%3A2.1--hdfd78af_0':
'quay.io/biocontainers/gamma:2.1--hdfd78af_0' }"
input:
tuple val(meta), path(fasta)
path(db)
output:
tuple val(meta), path("*.gamma") , emit: gamma
tuple val(meta), path("*.psl") , emit: psl
tuple val(meta), path("*.gff") , optional:true , emit: gff
tuple val(meta), path("*.fasta"), optional:true , emit: fasta
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
GAMMA.py \\
$args \\
$fasta \\
$db \\
$prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gamma: $VERSION
END_VERSIONS
"""
}

63
modules/gamma/meta.yml Normal file
View file

@ -0,0 +1,63 @@
name: "gamma"
description: Gene Allele Mutation Microbial Assessment
keywords:
- gamma
- gene-calling
tools:
- "gamma":
description: "Tool for Gene Allele Mutation Microbial Assessment"
homepage: "https://github.com/rastanton/GAMMA"
documentation: "https://github.com/rastanton/GAMMA"
tool_dev_url: "https://github.com/rastanton/GAMMA"
doi: "10.1093/bioinformatics/btab607"
licence: "['Apache License 2.0']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- fasta:
type: file
description: FASTA file
pattern: "*.{fa,fasta}"
- db:
type: file
description: Database in FASTA format
pattern: "*.{fa,fasta}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- gamma:
type: file
description: GAMMA file with annotated gene matches
pattern: "*.{gamma}"
- psl:
type: file
description: PSL file with all gene matches found
pattern: "*.{psl}"
- gff:
type: file
description: GFF file
pattern: "*.{gff}"
- fasta:
type: file
description: multifasta file of the gene matches
pattern: "*.{fasta}"
authors:
- "@sateeshperi"
- "@rastanton"

View file

@ -17,7 +17,7 @@ process GATK4_HAPLOTYPECALLER {
output: output:
tuple val(meta), path("*.vcf.gz"), emit: vcf tuple val(meta), path("*.vcf.gz"), emit: vcf
tuple val(meta), path("*.tbi") , emit: tbi tuple val(meta), path("*.tbi") , optional:true, emit: tbi
path "versions.yml" , emit: versions path "versions.yml" , emit: versions
when: when:

View file

@ -8,7 +8,7 @@ process GATK4_SPLITNCIGARREADS {
'quay.io/biocontainers/gatk4:4.2.5.0--hdfd78af_0' }" 'quay.io/biocontainers/gatk4:4.2.5.0--hdfd78af_0' }"
input: input:
tuple val(meta), path(bam) tuple val(meta), path(bam), path(bai), path(intervals)
path fasta path fasta
path fai path fai
path dict path dict
@ -23,6 +23,7 @@ process GATK4_SPLITNCIGARREADS {
script: script:
def args = task.ext.args ?: '' def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
def interval_command = intervals ? "--intervals $intervals" : ""
def avail_mem = 3 def avail_mem = 3
if (!task.memory) { if (!task.memory) {
@ -35,6 +36,7 @@ process GATK4_SPLITNCIGARREADS {
--input $bam \\ --input $bam \\
--output ${prefix}.bam \\ --output ${prefix}.bam \\
--reference $fasta \\ --reference $fasta \\
$interval_command \\
--tmp-dir . \\ --tmp-dir . \\
$args $args

View file

@ -23,6 +23,13 @@ input:
type: list type: list
description: BAM/SAM/CRAM file containing reads description: BAM/SAM/CRAM file containing reads
pattern: "*.{bam,sam,cram}" pattern: "*.{bam,sam,cram}"
- bai:
type: list
description: BAI/SAI/CRAI index file (optional)
pattern: "*.{bai,sai,crai}"
- intervals:
type: file
description: Bed file with the genomic regions included in the library (optional)
- fasta: - fasta:
type: file type: file
description: The reference fasta file description: The reference fasta file

View file

@ -4,8 +4,8 @@ process GUNZIP {
conda (params.enable_conda ? "conda-forge::sed=4.7" : null) conda (params.enable_conda ? "conda-forge::sed=4.7" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://containers.biocontainers.pro/s3/SingImgsRepo/biocontainers/v1.2.0_cv1/biocontainers_v1.2.0_cv1.img' : 'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
'biocontainers/biocontainers:v1.2.0_cv1' }" 'ubuntu:20.04' }"
input: input:
tuple val(meta), path(archive) tuple val(meta), path(archive)

View file

@ -0,0 +1,40 @@
process KAIJU_KAIJU2TABLE {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::kaiju=1.8.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/kaiju:1.8.2--h5b5514e_1':
'quay.io/biocontainers/kaiju:1.8.2--h2e03b76_0' }"
input:
tuple val(meta), path(results)
path db
val taxon_rank
output:
tuple val(meta), path('*.txt'), emit: summary
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
dbnodes=`find -L ${db} -name "*nodes.dmp"`
dbname=`find -L ${db} -name "*.fmi" -not -name "._*"`
kaiju2table $args \\
-t \$dbnodes \\
-n \$dbname \\
-r ${taxon_rank} \\
-o ${prefix}.txt \\
${results}
cat <<-END_VERSIONS > versions.yml
"${task.process}":
kaiju: \$(echo \$( kaiju -h 2>&1 | sed -n 1p | sed 's/^.*Kaiju //' ))
END_VERSIONS
"""
}

View file

@ -0,0 +1,50 @@
name: "kaiju_kaiju2table"
description: write your description here
keywords:
- classify
- metagenomics
tools:
- kaiju:
description: Fast and sensitive taxonomic classification for metagenomics
homepage: https://kaiju.binf.ku.dk/
documentation: https://github.com/bioinformatics-centre/kaiju/blob/master/README.md
tool_dev_url: https://github.com/bioinformatics-centre/kaiju
doi: "10.1038/ncomms11257"
licence: ["GNU GPL v3"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- results:
type: file
description: File containing the kaiju classification results
pattern: "*.{txt}"
- taxon_rank:
type: string
description: |
Taxonomic rank to display in report
pattern: "phylum|class|order|family|genus|species"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- results:
type: file
description: |
Summary table for a given taxonomic rank
pattern: "*.{tsv}"
authors:
- "@sofstam"
- "@talnor"
- "@jfy133"

View file

@ -10,11 +10,14 @@ process KRAKEN2_KRAKEN2 {
input: input:
tuple val(meta), path(reads) tuple val(meta), path(reads)
path db path db
val save_output_fastqs
val save_reads_assignment
output: output:
tuple val(meta), path('*classified*') , emit: classified tuple val(meta), path('*classified*') , optional:true, emit: classified_reads_fastq
tuple val(meta), path('*unclassified*'), emit: unclassified tuple val(meta), path('*unclassified*') , optional:true, emit: unclassified_reads_fastq
tuple val(meta), path('*report.txt') , emit: txt tuple val(meta), path('*classifiedreads*'), optional:true, emit: classified_reads_assignment
tuple val(meta), path('*report.txt') , emit: report
path "versions.yml" , emit: versions path "versions.yml" , emit: versions
when: when:
@ -26,19 +29,25 @@ process KRAKEN2_KRAKEN2 {
def paired = meta.single_end ? "" : "--paired" def paired = meta.single_end ? "" : "--paired"
def classified = meta.single_end ? "${prefix}.classified.fastq" : "${prefix}.classified#.fastq" def classified = meta.single_end ? "${prefix}.classified.fastq" : "${prefix}.classified#.fastq"
def unclassified = meta.single_end ? "${prefix}.unclassified.fastq" : "${prefix}.unclassified#.fastq" def unclassified = meta.single_end ? "${prefix}.unclassified.fastq" : "${prefix}.unclassified#.fastq"
def classified_command = save_output_fastqs ? "--classified-out ${classified}" : ""
def unclassified_command = save_output_fastqs ? "--unclassified-out ${unclassified}" : ""
def readclassification_command = save_reads_assignment ? "--output ${prefix}.kraken2.classifiedreads.txt" : ""
def compress_reads_command = save_output_fastqs ? "pigz -p $task.cpus *.fastq" : ""
""" """
kraken2 \\ kraken2 \\
--db $db \\ --db $db \\
--threads $task.cpus \\ --threads $task.cpus \\
--unclassified-out $unclassified \\
--classified-out $classified \\
--report ${prefix}.kraken2.report.txt \\ --report ${prefix}.kraken2.report.txt \\
--gzip-compressed \\ --gzip-compressed \\
$unclassified_command \\
$classified_command \\
$readclassification_command \\
$paired \\ $paired \\
$args \\ $args \\
$reads $reads
pigz -p $task.cpus *.fastq $compress_reads_command
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -27,25 +27,40 @@ input:
- db: - db:
type: directory type: directory
description: Kraken2 database description: Kraken2 database
- save_output_fastqs:
type: boolean
description: |
If true, optional commands are added to save classified and unclassified reads
as fastq files
- save_reads_assignment:
type: boolean
description: |
If true, an optional command is added to save a file reporting the taxonomic
classification of each input read
output: output:
- meta: - meta:
type: map type: map
description: | description: |
Groovy Map containing sample information Groovy Map containing sample information
e.g. [ id:'test', single_end:false ] e.g. [ id:'test', single_end:false ]
- classified: - classified_reads_fastq:
type: file type: file
description: | description: |
Reads classified to belong to any of the taxa Reads classified as belonging to any of the taxa
on the Kraken2 database. on the Kraken2 database.
pattern: "*{fastq.gz}" pattern: "*{fastq.gz}"
- unclassified: - unclassified_reads_fastq:
type: file type: file
description: | description: |
Reads not classified to belong to any of the taxa Reads not classified to any of the taxa
on the Kraken2 database. on the Kraken2 database.
pattern: "*{fastq.gz}" pattern: "*{fastq.gz}"
- txt: - classified_reads_assignment:
type: file
description: |
Kraken2 output file indicating the taxonomic assignment of
each input read
- report:
type: file type: file
description: | description: |
Kraken2 report containing stats about classified Kraken2 report containing stats about classified

View file

@ -2,17 +2,21 @@ process MINIMAP2_ALIGN {
tag "$meta.id" tag "$meta.id"
label 'process_medium' label 'process_medium'
conda (params.enable_conda ? 'bioconda::minimap2=2.21' : null) conda (params.enable_conda ? 'bioconda::minimap2=2.21 bioconda::samtools=1.12' : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/minimap2:2.21--h5bf99c6_0' : 'https://depot.galaxyproject.org/singularity/mulled-v2-66534bcbb7031a148b13e2ad42583020b9cd25c4:1679e915ddb9d6b4abda91880c4b48857d471bd8-0' :
'quay.io/biocontainers/minimap2:2.21--h5bf99c6_0' }" 'quay.io/biocontainers/mulled-v2-66534bcbb7031a148b13e2ad42583020b9cd25c4:1679e915ddb9d6b4abda91880c4b48857d471bd8-0' }"
input: input:
tuple val(meta), path(reads) tuple val(meta), path(reads)
path reference path reference
val bam_format
val cigar_paf_format
val cigar_bam
output: output:
tuple val(meta), path("*.paf"), emit: paf tuple val(meta), path("*.paf"), optional: true, emit: paf
tuple val(meta), path("*.bam"), optional: true, emit: bam
path "versions.yml" , emit: versions path "versions.yml" , emit: versions
when: when:
@ -22,13 +26,19 @@ process MINIMAP2_ALIGN {
def args = task.ext.args ?: '' def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
def input_reads = meta.single_end ? "$reads" : "${reads[0]} ${reads[1]}" def input_reads = meta.single_end ? "$reads" : "${reads[0]} ${reads[1]}"
def bam_output = bam_format ? "-a | samtools sort | samtools view -@ ${task.cpus} -b -h -o ${prefix}.bam" : "-o ${prefix}.paf"
def cigar_paf = cigar_paf_format && !sam_format ? "-c" : ''
def set_cigar_bam = cigar_bam && sam_format ? "-L" : ''
""" """
minimap2 \\ minimap2 \\
$args \\ $args \\
-t $task.cpus \\ -t $task.cpus \\
$reference \\ $reference \\
$input_reads \\ $input_reads \\
> ${prefix}.paf $cigar_paf \\
$set_cigar_bam \\
$bam_output
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -29,6 +29,17 @@ input:
type: file type: file
description: | description: |
Reference database in FASTA format. Reference database in FASTA format.
- bam_format:
type: boolean
description: Specify that output should be in BAM format
- cigar_paf_format:
type: boolean
description: Specify that output CIGAR should be in PAF format
- cigar_bam:
type: boolean
description: |
Write CIGAR with >65535 ops at the CG tag. This is recommended when
doing XYZ (https://github.com/lh3/minimap2#working-with-65535-cigar-operations)
output: output:
- meta: - meta:
type: map type: map
@ -39,9 +50,16 @@ output:
type: file type: file
description: Alignment in PAF format description: Alignment in PAF format
pattern: "*.paf" pattern: "*.paf"
- bam:
type: file
description: Alignment in BAM format
pattern: "*.bam"
- versions: - versions:
type: file type: file
description: File containing software versions description: File containing software versions
pattern: "versions.yml" pattern: "versions.yml"
authors: authors:
- "@heuermh" - "@heuermh"
- "@sofstam"
- "@sateeshperi"
- "@jfy133"

View file

@ -23,10 +23,11 @@ process PHANTOMPEAKQUALTOOLS {
script: script:
def args = task.ext.args ?: '' def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
""" """
RUN_SPP=`which run_spp.R` RUN_SPP=`which run_spp.R`
Rscript $args -e "library(caTools); source(\\"\$RUN_SPP\\")" -c="$bam" -savp="${prefix}.spp.pdf" -savd="${prefix}.spp.Rdata" -out="${prefix}.spp.out" -p=$task.cpus Rscript $args -e "library(caTools); source(\\"\$RUN_SPP\\")" -c="$bam" -savp="${prefix}.spp.pdf" -savd="${prefix}.spp.Rdata" -out="${prefix}.spp.out" $args2
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -15,7 +15,7 @@ process PICARD_COLLECTHSMETRICS {
path target_intervals path target_intervals
output: output:
tuple val(meta), path("*collecthsmetrics.txt"), emit: hs_metrics tuple val(meta), path("*_metrics") , emit: metrics
path "versions.yml" , emit: versions path "versions.yml" , emit: versions
when: when:
@ -41,7 +41,8 @@ process PICARD_COLLECTHSMETRICS {
-BAIT_INTERVALS $bait_intervals \\ -BAIT_INTERVALS $bait_intervals \\
-TARGET_INTERVALS $target_intervals \\ -TARGET_INTERVALS $target_intervals \\
-INPUT $bam \\ -INPUT $bam \\
-OUTPUT ${prefix}_collecthsmetrics.txt -OUTPUT ${prefix}.CollectHsMetrics.coverage_metrics
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":
@ -52,7 +53,7 @@ process PICARD_COLLECTHSMETRICS {
stub: stub:
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
""" """
touch ${prefix}_collecthsmetrics.txt touch ${prefix}.CollectHsMetrics.coverage_metrics
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -57,10 +57,11 @@ output:
type: file type: file
description: File containing software versions description: File containing software versions
pattern: "versions.yml" pattern: "versions.yml"
- hs_metrics: - metrics:
type: file type: file
description: The metrics file. description: Alignment metrics files generated by picard
pattern: "*_collecthsmetrics.txt" pattern: "*_{metrics}"
authors: authors:
- "@projectoriented" - "@projectoriented"
- "@matthdsm"

View file

@ -2,10 +2,10 @@ process RSEM_CALCULATEEXPRESSION {
tag "$meta.id" tag "$meta.id"
label 'process_high' label 'process_high'
conda (params.enable_conda ? "bioconda::rsem=1.3.3 bioconda::star=2.7.6a" : null) conda (params.enable_conda ? "bioconda::rsem=1.3.3 bioconda::star=2.7.10a" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/mulled-v2-cf0123ef83b3c38c13e3b0696a3f285d3f20f15b:606b713ec440e799d53a2b51a6e79dbfd28ecf3e-0' : 'https://depot.galaxyproject.org/singularity/mulled-v2-cf0123ef83b3c38c13e3b0696a3f285d3f20f15b:64aad4a4e144878400649e71f42105311be7ed87-0' :
'quay.io/biocontainers/mulled-v2-cf0123ef83b3c38c13e3b0696a3f285d3f20f15b:606b713ec440e799d53a2b51a6e79dbfd28ecf3e-0' }" 'quay.io/biocontainers/mulled-v2-cf0123ef83b3c38c13e3b0696a3f285d3f20f15b:64aad4a4e144878400649e71f42105311be7ed87-0' }"
input: input:
tuple val(meta), path(reads) tuple val(meta), path(reads)

View file

@ -2,10 +2,10 @@ process RSEM_PREPAREREFERENCE {
tag "$fasta" tag "$fasta"
label 'process_high' label 'process_high'
conda (params.enable_conda ? "bioconda::rsem=1.3.3 bioconda::star=2.7.6a" : null) conda (params.enable_conda ? "bioconda::rsem=1.3.3 bioconda::star=2.7.10a" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/mulled-v2-cf0123ef83b3c38c13e3b0696a3f285d3f20f15b:606b713ec440e799d53a2b51a6e79dbfd28ecf3e-0' : 'https://depot.galaxyproject.org/singularity/mulled-v2-cf0123ef83b3c38c13e3b0696a3f285d3f20f15b:64aad4a4e144878400649e71f42105311be7ed87-0' :
'quay.io/biocontainers/mulled-v2-cf0123ef83b3c38c13e3b0696a3f285d3f20f15b:606b713ec440e799d53a2b51a6e79dbfd28ecf3e-0' }" 'quay.io/biocontainers/mulled-v2-cf0123ef83b3c38c13e3b0696a3f285d3f20f15b:64aad4a4e144878400649e71f42105311be7ed87-0' }"
input: input:
path fasta, stageAs: "rsem/*" path fasta, stageAs: "rsem/*"

View file

@ -0,0 +1,35 @@
//There is a -L option to only output alignments in interval, might be an option for exons/panel data?
process SAMTOOLS_BAMTOCRAM {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
input:
tuple val(meta), path(input), path(index)
path fasta
path fai
output:
tuple val(meta), path("*.cram"), path("*.crai"), emit: cram_crai
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
samtools view --threads ${task.cpus} --reference ${fasta} -C $args $input > ${prefix}.cram
samtools index -@${task.cpus} ${prefix}.cram
cat <<-END_VERSIONS > versions.yml
"${task.process}":
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,52 @@
name: samtools_bamtocram
description: filter/convert and then index CRAM file
keywords:
- view
- index
- bam
- cram
tools:
- samtools:
description: |
SAMtools is a set of utilities for interacting with and post-processing
short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li.
These files are generated as output by short read aligners like BWA.
homepage: http://www.htslib.org/
documentation: hhttp://www.htslib.org/doc/samtools.html
doi: 10.1093/bioinformatics/btp352
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- input:
type: file
description: BAM/SAM file
pattern: "*.{bam,sam}"
- index:
type: file
description: BAM/SAM index file
pattern: "*.{bai,sai}"
- fasta:
type: file
description: Reference file to create the CRAM file
pattern: "*.{fasta,fa}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- cram_crai:
type: file
description: filtered/converted CRAM file + index
pattern: "*{.cram,.crai}"
- version:
type: file
description: File containing software version
pattern: "*.{version.txt}"
authors:
- "@FriederikeHanssen"
- "@maxulysse"

View file

@ -0,0 +1,47 @@
process SAMTOOLS_COLLATEFASTQ {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
input:
tuple val(meta), path(input)
output:
//TODO might be good to have ordered output of the fastq files, so we can
// make sure the we get the right files
tuple val(meta), path("*_{1,2}.fq.gz"), path("*_other.fq.gz"), path("*_singleton.fq.gz"), emit: reads
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
samtools collate \\
$args \\
--threads $task.cpus \\
-O \\
$input \\
. |
samtools fastq \\
$args2 \\
--threads $task.cpus \\
-1 ${prefix}_1.fq.gz \\
-2 ${prefix}_2.fq.gz \\
-0 ${prefix}_other.fq.gz \\
-s ${prefix}_singleton.fq.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,48 @@
name: samtools_collatefastq
description: |
The module uses collate and then fastq methods from samtools to
convert a SAM, BAM or CRAM file to FASTQ format
keywords:
- bam2fq
- samtools
- fastq
tools:
- samtools:
description: Tools for dealing with SAM, BAM and CRAM files
homepage: None
documentation: http://www.htslib.org/doc/1.1/samtools.html
tool_dev_url: None
doi: ""
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- input:
type: file
description: BAM/CRAM/SAM file
pattern: "*.{bam,cram,sam}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: |
FASTQ files, which will be either a group of 4 files (read_1, read_2, other and singleton)
or a single interleaved .fq.gz file if the user chooses not to split the reads.
pattern: "*.fq.gz"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@lescai"
- "@maxulysse"

View file

@ -8,7 +8,7 @@ process SAMTOOLS_VIEW {
'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }" 'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
input: input:
tuple val(meta), path(input) tuple val(meta), path(input), path(index)
path fasta path fasta
output: output:

View file

@ -25,6 +25,10 @@ input:
type: file type: file
description: BAM/CRAM/SAM file description: BAM/CRAM/SAM file
pattern: "*.{bam,cram,sam}" pattern: "*.{bam,cram,sam}"
- index:
type: optional file
description: BAM.BAI/CRAM.CRAI file
pattern: "*.{.bai,.crai}"
- fasta: - fasta:
type: optional file type: optional file
description: Reference file the CRAM was created with description: Reference file the CRAM was created with

View file

@ -9,6 +9,7 @@ process STRANGER {
input: input:
tuple val(meta), path(vcf) tuple val(meta), path(vcf)
path variant_catalog
output: output:
tuple val(meta), path("*.gz"), emit: vcf tuple val(meta), path("*.gz"), emit: vcf
@ -20,10 +21,23 @@ process STRANGER {
script: script:
def args = task.ext.args ?: '' def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
def options_variant_catalog = variant_catalog ? "--repeats-file $variant_catalog" : ""
""" """
stranger \\ stranger \\
$args \\ $args \\
$vcf | gzip --no-name > ${prefix}.vcf.gz $vcf \\
$options_variant_catalog | gzip --no-name > ${prefix}.vcf.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
stranger: \$( stranger --version )
END_VERSIONS
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.vcf.gz
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -24,6 +24,10 @@ input:
type: file type: file
description: VCF with repeat expansions description: VCF with repeat expansions
pattern: "*.{vcf.gz,vcf}" pattern: "*.{vcf.gz,vcf}"
- variant_catalog:
type: file
description: json file with repeat expansion sites to genotype
pattern: "*.{json}"
output: output:
- meta: - meta:

View file

@ -2,10 +2,10 @@ process STRINGTIE_MERGE {
label 'process_medium' label 'process_medium'
// Note: 2.7X indices incompatible with AWS iGenomes. // Note: 2.7X indices incompatible with AWS iGenomes.
conda (params.enable_conda ? "bioconda::stringtie=2.1.7" : null) conda (params.enable_conda ? "bioconda::stringtie=2.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/stringtie:2.1.7--h978d192_0' : 'https://depot.galaxyproject.org/singularity/stringtie:2.2.1--hecb563c_2' :
'quay.io/biocontainers/stringtie:2.1.7--h978d192_0' }" 'quay.io/biocontainers/stringtie:2.2.1--hecb563c_2' }"
input: input:
path stringtie_gtf path stringtie_gtf

View file

@ -1,11 +1,11 @@
process STRINGTIE { process STRINGTIE_STRINGTIE {
tag "$meta.id" tag "$meta.id"
label 'process_medium' label 'process_medium'
conda (params.enable_conda ? "bioconda::stringtie=2.1.7" : null) conda (params.enable_conda ? "bioconda::stringtie=2.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/stringtie:2.1.7--h978d192_0' : 'https://depot.galaxyproject.org/singularity/stringtie:2.2.1--hecb563c_2' :
'quay.io/biocontainers/stringtie:2.1.7--h978d192_0' }" 'quay.io/biocontainers/stringtie:2.2.1--hecb563c_2' }"
input: input:
tuple val(meta), path(bam) tuple val(meta), path(bam)

View file

@ -1,4 +1,4 @@
name: stringtie name: stringtie_stringtie
description: Transcript assembly and quantification for RNA-Se description: Transcript assembly and quantification for RNA-Se
keywords: keywords:
- transcript - transcript

View file

@ -11,7 +11,7 @@ process TABIX_BGZIP {
tuple val(meta), path(input) tuple val(meta), path(input)
output: output:
tuple val(meta), path("*.gz"), emit: gz tuple val(meta), path("${prefix}*"), emit: output
path "versions.yml" , emit: versions path "versions.yml" , emit: versions
when: when:
@ -19,9 +19,12 @@ process TABIX_BGZIP {
script: script:
def args = task.ext.args ?: '' def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}" prefix = task.ext.prefix ?: "${meta.id}"
in_bgzip = input.toString().endsWith(".gz")
command1 = in_bgzip ? '-d' : '-c'
command2 = in_bgzip ? '' : " > ${prefix}.${input.getExtension()}.gz"
""" """
bgzip -c $args $input > ${prefix}.${input.getExtension()}.gz bgzip $command1 $args -@${task.cpus} $input $command2
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -1,13 +1,14 @@
name: tabix_bgzip name: tabix_bgzip
description: Compresses files description: Compresses/decompresses files
keywords: keywords:
- compress - compress
- decompress
- bgzip - bgzip
- tabix - tabix
tools: tools:
- bgzip: - bgzip:
description: | description: |
Bgzip compresses files in a similar manner to, and compatible with, gzip. Bgzip compresses or decompresses files in a similar manner to, and compatible with, gzip.
homepage: https://www.htslib.org/doc/tabix.html homepage: https://www.htslib.org/doc/tabix.html
documentation: http://www.htslib.org/doc/bgzip.html documentation: http://www.htslib.org/doc/bgzip.html
doi: 10.1093/bioinformatics/btp352 doi: 10.1093/bioinformatics/btp352
@ -18,19 +19,19 @@ input:
description: | description: |
Groovy Map containing sample information Groovy Map containing sample information
e.g. [ id:'test', single_end:false ] e.g. [ id:'test', single_end:false ]
- file: - input:
type: file type: file
description: text file description: file to compress or to decompress
output: output:
- meta: - meta:
type: map type: map
description: | description: |
Groovy Map containing sample information Groovy Map containing sample information
e.g. [ id:'test', single_end:false ] e.g. [ id:'test', single_end:false ]
- file: - output:
type: file type: file
description: Output compressed file description: Output compressed/decompressed file
pattern: "*.{gz}" pattern: "*."
- versions: - versions:
type: file type: file
description: File containing software versions description: File containing software versions

View file

@ -24,7 +24,7 @@ process TIDDIT_SV {
script: script:
def args = task.ext.args ?: '' def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
def reference = fasta == "dummy_file.txt" ? "--ref $fasta" : "" def reference = fasta ? "--ref $fasta" : ""
""" """
tiddit \\ tiddit \\
--sv \\ --sv \\

View file

@ -11,12 +11,13 @@ process TRIMGALORE {
tuple val(meta), path(reads) tuple val(meta), path(reads)
output: output:
tuple val(meta), path("*.fq.gz") , emit: reads tuple val(meta), path("*{trimmed,val}*.fq.gz"), emit: reads
tuple val(meta), path("*report.txt") , emit: log tuple val(meta), path("*report.txt") , emit: log
path "versions.yml" , emit: versions path "versions.yml" , emit: versions
tuple val(meta), path("*.html"), emit: html optional true tuple val(meta), path("*unpaired*.fq.gz") , emit: unpaired, optional: true
tuple val(meta), path("*.zip") , emit: zip optional true tuple val(meta), path("*.html") , emit: html , optional: true
tuple val(meta), path("*.zip") , emit: zip , optional: true
when: when:
task.ext.when == null || task.ext.when task.ext.when == null || task.ext.when
@ -52,6 +53,7 @@ process TRIMGALORE {
$c_r1 \\ $c_r1 \\
$tpc_r1 \\ $tpc_r1 \\
${prefix}.fastq.gz ${prefix}.fastq.gz
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":
trimgalore: \$(echo \$(trim_galore --version 2>&1) | sed 's/^.*version //; s/Last.*\$//') trimgalore: \$(echo \$(trim_galore --version 2>&1) | sed 's/^.*version //; s/Last.*\$//')
@ -73,6 +75,7 @@ process TRIMGALORE {
$tpc_r2 \\ $tpc_r2 \\
${prefix}_1.fastq.gz \\ ${prefix}_1.fastq.gz \\
${prefix}_2.fastq.gz ${prefix}_2.fastq.gz
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":
trimgalore: \$(echo \$(trim_galore --version 2>&1) | sed 's/^.*version //; s/Last.*\$//') trimgalore: \$(echo \$(trim_galore --version 2>&1) | sed 's/^.*version //; s/Last.*\$//')

View file

@ -37,6 +37,11 @@ output:
List of input adapter trimmed FastQ files of size 1 and 2 for List of input adapter trimmed FastQ files of size 1 and 2 for
single-end and paired-end data, respectively. single-end and paired-end data, respectively.
pattern: "*.{fq.gz}" pattern: "*.{fq.gz}"
- unpaired:
type: file
description: |
FastQ files containing unpaired reads from read 1 or read 2
pattern: "*unpaired*.fq.gz"
- html: - html:
type: file type: file
description: FastQC report (optional) description: FastQC report (optional)

View file

@ -2,10 +2,10 @@ process UNTAR {
tag "$archive" tag "$archive"
label 'process_low' label 'process_low'
conda (params.enable_conda ? "conda-forge::tar=1.34" : null) conda (params.enable_conda ? "conda-forge::sed=4.7" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://containers.biocontainers.pro/s3/SingImgsRepo/biocontainers/v1.2.0_cv2/biocontainers_v1.2.0_cv2.img' : 'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
'biocontainers/biocontainers:v1.2.0_cv2' }" 'ubuntu:20.04' }"
input: input:
tuple val(meta), path(archive) tuple val(meta), path(archive)

View file

@ -26,6 +26,10 @@ allelecounter:
- modules/allelecounter/** - modules/allelecounter/**
- tests/modules/allelecounter/** - tests/modules/allelecounter/**
amplify/predict:
- modules/amplify/predict/**
- tests/modules/amplify/predict/**
amps: amps:
- modules/amps/** - modules/amps/**
- tests/modules/amps/** - tests/modules/amps/**
@ -38,6 +42,10 @@ amrfinderplus/update:
- modules/amrfinderplus/update/** - modules/amrfinderplus/update/**
- tests/modules/amrfinderplus/update/** - tests/modules/amrfinderplus/update/**
antismash/antismashlitedownloaddatabases:
- modules/antismash/antismashlitedownloaddatabases/**
- tests/modules/antismash/antismashlitedownloaddatabases/**
arriba: arriba:
- modules/arriba/** - modules/arriba/**
- tests/modules/arriba/** - tests/modules/arriba/**
@ -166,6 +174,10 @@ bcftools/view:
- modules/bcftools/view/** - modules/bcftools/view/**
- tests/modules/bcftools/view/** - tests/modules/bcftools/view/**
bclconvert:
- modules/bclconvert/**
- tests/modules/bclconvert/**
bedtools/bamtobed: bedtools/bamtobed:
- modules/bedtools/bamtobed/** - modules/bedtools/bamtobed/**
- tests/modules/bedtools/bamtobed/** - tests/modules/bedtools/bamtobed/**
@ -587,6 +599,14 @@ ectyper:
- modules/ectyper/** - modules/ectyper/**
- tests/modules/ectyper/** - tests/modules/ectyper/**
elprep/filter:
- modules/elprep/filter/**
- tests/modules/elprep/filter/**
elprep/split:
- modules/elprep/split/**
- tests/modules/elprep/split/**
emmtyper: emmtyper:
- modules/emmtyper/** - modules/emmtyper/**
- tests/modules/emmtyper/** - tests/modules/emmtyper/**
@ -655,10 +675,18 @@ freebayes:
- modules/freebayes/** - modules/freebayes/**
- tests/modules/freebayes/** - tests/modules/freebayes/**
gamma:
- modules/gamma/**
- tests/modules/gamma/**
gatk4/applybqsr: gatk4/applybqsr:
- modules/gatk4/applybqsr/** - modules/gatk4/applybqsr/**
- tests/modules/gatk4/applybqsr/** - tests/modules/gatk4/applybqsr/**
gatk4/applybqsrspark:
- modules/gatk4/applybqsrspark/**
- tests/modules/gatk4/applybqsrspark/**
gatk4/applyvqsr: gatk4/applyvqsr:
- modules/gatk4/applyvqsr/** - modules/gatk4/applyvqsr/**
- tests/modules/gatk4/applyvqsr/** - tests/modules/gatk4/applyvqsr/**
@ -667,6 +695,10 @@ gatk4/baserecalibrator:
- modules/gatk4/baserecalibrator/** - modules/gatk4/baserecalibrator/**
- tests/modules/gatk4/baserecalibrator/** - tests/modules/gatk4/baserecalibrator/**
gatk4/baserecalibratorspark:
- modules/gatk4/baserecalibratorspark/**
- tests/modules/gatk4/baserecalibratorspark/**
gatk4/bedtointervallist: gatk4/bedtointervallist:
- modules/gatk4/bedtointervallist/** - modules/gatk4/bedtointervallist/**
- tests/modules/gatk4/bedtointervallist/** - tests/modules/gatk4/bedtointervallist/**
@ -743,6 +775,10 @@ gatk4/markduplicates:
- modules/gatk4/markduplicates/** - modules/gatk4/markduplicates/**
- tests/modules/gatk4/markduplicates/** - tests/modules/gatk4/markduplicates/**
gatk4/markduplicatesspark:
- modules/gatk4/markduplicatesspark/**
- tests/modules/gatk4/markduplicatesspark/**
gatk4/mergebamalignment: gatk4/mergebamalignment:
- modules/gatk4/mergebamalignment/** - modules/gatk4/mergebamalignment/**
- tests/modules/gatk4/mergebamalignment/** - tests/modules/gatk4/mergebamalignment/**
@ -977,6 +1013,10 @@ kaiju/kaiju:
- modules/kaiju/kaiju/** - modules/kaiju/kaiju/**
- tests/modules/kaiju/kaiju/** - tests/modules/kaiju/kaiju/**
kaiju/kaiju2table:
- modules/kaiju/kaiju2table/**
- tests/modules/kaiju/kaiju2table/**
kallisto/index: kallisto/index:
- modules/kallisto/index/** - modules/kallisto/index/**
- tests/modules/kallisto/index/** - tests/modules/kallisto/index/**
@ -1559,6 +1599,14 @@ samtools/bam2fq:
- modules/samtools/bam2fq/** - modules/samtools/bam2fq/**
- tests/modules/samtools/bam2fq/** - tests/modules/samtools/bam2fq/**
samtools/bamtocram:
- modules/samtools/bamtocram/**
- tests/modules/samtools/bamtocram/**
samtools/collatefastq:
- modules/samtools/collatefastq/**
- tests/modules/samtools/collatefastq/**
samtools/depth: samtools/depth:
- modules/samtools/depth/** - modules/samtools/depth/**
- tests/modules/samtools/depth/** - tests/modules/samtools/depth/**

View file

@ -112,6 +112,7 @@ params {
} }
'homo_sapiens' { 'homo_sapiens' {
'genome' { 'genome' {
genome_elfasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome.elfasta"
genome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome.fasta" genome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome.fasta"
genome_fasta_fai = "${test_data_dir}/genomics/homo_sapiens/genome/genome.fasta.fai" genome_fasta_fai = "${test_data_dir}/genomics/homo_sapiens/genome/genome.fasta.fai"
genome_dict = "${test_data_dir}/genomics/homo_sapiens/genome/genome.dict" genome_dict = "${test_data_dir}/genomics/homo_sapiens/genome/genome.dict"
@ -123,6 +124,7 @@ params {
genome_header = "${test_data_dir}/genomics/homo_sapiens/genome/genome.header" genome_header = "${test_data_dir}/genomics/homo_sapiens/genome/genome.header"
genome_bed_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed.gz" genome_bed_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed.gz"
genome_bed_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed.gz.tbi" genome_bed_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed.gz.tbi"
genome_elsites = "${test_data_dir}/genomics/homo_sapiens/genome/genome.elsites"
transcriptome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/transcriptome.fasta" transcriptome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/transcriptome.fasta"
genome2_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome2.fasta" genome2_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome2.fasta"
genome_chain_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.chain.gz" genome_chain_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.chain.gz"
@ -136,6 +138,7 @@ params {
genome_21_multi_interval_bed_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/multi_intervals.bed.gz.tbi" genome_21_multi_interval_bed_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/multi_intervals.bed.gz.tbi"
genome_21_chromosomes_dir = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/chromosomes.tar.gz" genome_21_chromosomes_dir = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/chromosomes.tar.gz"
dbsnp_146_hg38_elsites = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.elsites"
dbsnp_146_hg38_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.vcf.gz" dbsnp_146_hg38_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.vcf.gz"
dbsnp_146_hg38_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.vcf.gz.tbi" dbsnp_146_hg38_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.vcf.gz.tbi"
gnomad_r2_1_1_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/gnomAD.r2.1.1.vcf.gz" gnomad_r2_1_1_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/gnomAD.r2.1.1.vcf.gz"
@ -242,8 +245,8 @@ params {
test2_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2_2.fastq.gz" test2_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2_2.fastq.gz"
test2_umi_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2.umi_1.fastq.gz" test2_umi_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2.umi_1.fastq.gz"
test2_umi_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2.umi_2.fastq.gz" test2_umi_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test2.umi_2.fastq.gz"
test_rnaseq_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test.rnaseq_1.fastq.gz" test_rnaseq_1_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test_rnaseq_1.fastq.gz"
test_rnaseq_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test.rnaseq_2.fastq.gz" test_rnaseq_2_fastq_gz = "${test_data_dir}/genomics/homo_sapiens/illumina/fastq/test_rnaseq_2.fastq.gz"
test_baserecalibrator_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test.baserecalibrator.table" test_baserecalibrator_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test.baserecalibrator.table"
test2_baserecalibrator_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test2.baserecalibrator.table" test2_baserecalibrator_table = "${test_data_dir}/genomics/homo_sapiens/illumina/gatk/test2.baserecalibrator.table"
@ -332,6 +335,7 @@ params {
'bacteroides_fragilis' { 'bacteroides_fragilis' {
'genome' { 'genome' {
genome_fna_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.fna.gz" genome_fna_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.fna.gz"
genome_gbff_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.gbff.gz"
genome_paf = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.paf" genome_paf = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.paf"
genome_mapping_potential_arg = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.mapping.potential.ARG" genome_mapping_potential_arg = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.mapping.potential.ARG"

View file

@ -0,0 +1,18 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { PRODIGAL } from '../../../modules/prodigal/main.nf' addParams( options: [:] )
include { AMPLIFY_PREDICT } from '../../../../modules/amplify/predict/main.nf' addParams( options: [:] )
workflow amplify_predict {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['contigs_fasta'], checkIfExists: true)
]
model_dir = []
PRODIGAL ( input, "gff" )
AMPLIFY_PREDICT ( PRODIGAL.out.amino_acid_fasta, model_dir)
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,9 @@
- name: amplify predict amplify_predict
command: nextflow run tests/modules/amplify/predict -entry amplify_predict -c tests/config/nextflow.config
tags:
- amplify/predict
- amplify
files:
- path: output/amplify/test.tsv
md5sum: 1951084ce1d410028be86754997e5852
- path: output/amplify/versions.yml

View file

@ -0,0 +1,29 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { UNTAR as UNTAR1 } from '../../../../modules/untar/main.nf'
include { UNTAR as UNTAR2 } from '../../../../modules/untar/main.nf'
include { UNTAR as UNTAR3 } from '../../../../modules/untar/main.nf'
include { ANTISMASH_ANTISMASHLITEDOWNLOADDATABASES } from '../../../../modules/antismash/antismashlitedownloaddatabases/main.nf'
workflow test_antismash_antismashlitedownloaddatabases {
input1 = [
[],
file('https://github.com/nf-core/test-datasets/raw/modules/data/delete_me/antismash/css.tar.gz', checkIfExists: true)
]
input2 = [
[],
file('https://github.com/nf-core/test-datasets/raw/modules/data/delete_me/antismash/detection.tar.gz', checkIfExists: true)
]
input3 = [
[],
file('https://github.com/nf-core/test-datasets/raw/modules/data/delete_me/antismash/modules.tar.gz', checkIfExists: true)
]
UNTAR1 ( input1 )
UNTAR2 ( input2 )
UNTAR3 ( input3 )
ANTISMASH_ANTISMASHLITEDOWNLOADDATABASES ( UNTAR1.out.untar.map{ it[1] }, UNTAR2.out.untar.map{ it[1] }, UNTAR3.out.untar.map{ it[1] } )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,17 @@
- name: antismash antismashlitedownloaddatabases test_antismash_antismashlitedownloaddatabases
command: nextflow run tests/modules/antismash/antismashlitedownloaddatabases -entry test_antismash_antismashlitedownloaddatabases -c tests/config/nextflow.config
tags:
- antismash
- antismash/antismashlitedownloaddatabases
files:
- path: output/antismash/versions.yml
md5sum: 24859c67023abab99de295d3675a24b6
- path: output/antismash/antismash_db
- path: output/antismash/antismash_db/clusterblast
- path: output/antismash/antismash_db/clustercompare
- path: output/antismash/antismash_db/pfam
- path: output/antismash/antismash_db/resfam
- path: output/antismash/antismash_db/tigrfam
- path: output/antismash/css
- path: output/antismash/detection
- path: output/antismash/modules

View file

@ -0,0 +1,22 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BCLCONVERT } from '../../../modules/bclconvert/main.nf'
process STUB_BCLCONVERT_INPUT {
output:
path "SampleSheet.csv" ,emit: samplesheet
path "DDMMYY_SERIAL_FLOWCELL" ,emit: run_dir
stub:
"""
mkdir DDMMYY_SERIAL_FLOWCELL
echo "SampleSheet" > SampleSheet.csv
"""
}
workflow test_bclconvert {
STUB_BCLCONVERT_INPUT ()
BCLCONVERT (STUB_BCLCONVERT_INPUT.out.samplesheet, STUB_BCLCONVERT_INPUT.out.run_dir)
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,52 @@
- name: bclconvert test_bclconvert
command: nextflow run tests/modules/bclconvert -entry test_bclconvert -c tests/config/nextflow.config -stub-run
tags:
- bclconvert
files:
- path: output/bclconvert/InterOp/InterOp.bin
md5sum: d3dea0bb4ab1c8754af324f40b001481
- path: output/bclconvert/Logs/Errors.log
md5sum: 334645f09074b2597a692e395b716a9c
- path: output/bclconvert/Logs/FastqComplete.txt
md5sum: a4c4c6ce2d0de67d3b7ac7d1fcb512e4
- path: output/bclconvert/Logs/Info.log
md5sum: d238822d379f2277cac950ca986cb660
- path: output/bclconvert/Logs/Warnings.log
md5sum: aeefd2d631817e170f88f25ecaaf4664
- path: output/bclconvert/Reports/Adapter_Metrics.csv
md5sum: af62e9c7b44940cfd8ea11064a1f42ae
- path: output/bclconvert/Reports/Demultiplex_Stats.csv
md5sum: d11313931fcaabb5ce159462ad3dd1da
- path: output/bclconvert/Reports/IndexMetricsOut.bin
md5sum: 6bcee11c8145e3b1059ceaa91d2f5be7
- path: output/bclconvert/Reports/Index_Hopping_Counts.csv
md5sum: 697e40e0c0d48b4bd25f138ef60b0bde
- path: output/bclconvert/Reports/Quality_Metrics.csv
md5sum: 3902fd38f6b01f1ce0f0e8724238f8f2
- path: output/bclconvert/Reports/RunInfo.xml
md5sum: 5bef7c7e76360231b0c4afdfc915fd44
- path: output/bclconvert/Reports/SampleSheet.csv
md5sum: c579e7d2c9c917c4cfb875a0373c0936
- path: output/bclconvert/Reports/Top_Unknown_Barcodes.csv
md5sum: 39a5e7f6d21c12d6051afdc8261b6330
- path: output/bclconvert/Reports/fastq_list.csv
md5sum: 32c51ab10e013fd547928de57361ffcb
- path: output/bclconvert/sample1_S1_L001_R1_001.fastq.gz
md5sum: 9b831a39755935333f86f167527a094d
- path: output/bclconvert/sample1_S1_L001_R2_001.fastq.gz
md5sum: 082f4f767b7619f409ca7e752ef482bf
- path: output/bclconvert/sample1_S1_L002_R1_001.fastq.gz
md5sum: 837764c89db93dfb53cd663c4f26f3d7
- path: output/bclconvert/sample1_S1_L002_R2_001.fastq.gz
md5sum: 1a42cf6ba0bb8fc7770f278e6d1ab676
- path: output/bclconvert/sample2_S2_L001_R1_001.fastq.gz
md5sum: 475bc426b7cc48d0551d40e31457dc78
- path: output/bclconvert/sample2_S2_L001_R2_001.fastq.gz
md5sum: f670ccd7d9352e0e67fe1c1232429d94
- path: output/bclconvert/sample2_S2_L002_R1_001.fastq.gz
md5sum: ebd5ff6fa5603e7d704b5a10598de58c
- path: output/bclconvert/sample2_S2_L002_R2_001.fastq.gz
md5sum: 2f83b460f52620d2548c7ef8845b31d7
- path: output/stub/SampleSheet.csv
md5sum: c579e7d2c9c917c4cfb875a0373c0936
- path: output/bclconvert/versions.yml

View file

@ -0,0 +1,18 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { ELPREP_FILTER } from '../../../../modules/elprep/filter/main.nf'
workflow test_elprep_filter {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true)
]
reference_elfasta = file(params.test_data['homo_sapiens']['genome']['genome_elfasta'], checkIfExists: true)
known_sites_elsites = file(params.test_data['homo_sapiens']['genome']['dbsnp_146_hg38_elsites'], checkIfExists: true)
target_regions_bed = file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true)
ELPREP_FILTER ( input, true, true, [], [], reference_elfasta, known_sites_elsites, target_regions_bed, [], [], true, true)
}

View file

@ -0,0 +1,7 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: ELPREP_FILTER {
ext.args = "--mark-duplicates "
}
}

View file

@ -0,0 +1,13 @@
- name: elprep filter test_elprep_filter
command: nextflow run tests/modules/elprep/filter -entry test_elprep_filter -c tests/config/nextflow.config
tags:
- elprep
- elprep/filter
files:
- path: output/elprep/test.activity_profile.igv
- path: output/elprep/test.assembly_regions.igv
- path: output/elprep/output/test.bam
- path: output/elprep/test.g.vcf.gz
- path: output/elprep/test.metrics.txt
- path: output/elprep/test.recall
- path: output/elprep/versions.yml

View file

@ -0,0 +1,15 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { ELPREP_SPLIT } from '../../../../modules/elprep/split/main.nf'
workflow test_elprep_split {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true)
]
ELPREP_SPLIT ( input )
}

View file

@ -0,0 +1,9 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName : ELPREP_SPLIT {
ext.args = "--contig-group-size 1 --output-type bam"
}
}

View file

@ -0,0 +1,10 @@
- name: elprep split test_elprep_split
command: nextflow run tests/modules/elprep/split -entry test_elprep_split -c tests/config/nextflow.config
tags:
- elprep
- elprep/split
files:
- path: output/elprep/output/splits/test-group00001.bam
- path: output/elprep/output/splits/test-unmapped.bam
- path: output/elprep/output/test-spread.bam
- path: output/elprep/versions.yml

View file

@ -0,0 +1,17 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { GAMMA } from '../../../modules/gamma/main.nf'
workflow test_gamma {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
]
db = [ file(params.test_data['sarscov2']['genome']['transcriptome_fasta'], checkIfExists: true) ]
GAMMA ( input, db )
}

View file

@ -0,0 +1,7 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
ext.args = '--fasta'
}

View file

@ -0,0 +1,13 @@
- name: gamma test_gamma
command: nextflow run tests/modules/gamma -entry test_gamma -c tests/config/nextflow.config
tags:
- gamma
files:
- path: output/gamma/test.fasta
md5sum: df37b48466181311e0a679f3c5878484
- path: output/gamma/test.gamma
md5sum: 3256708fa517a65ed01d99e0e3c762ae
- path: output/gamma/test.psl
md5sum: 162a2757ed3b167ae1e0cdb24213f940
- path: output/gamma/versions.yml
md5sum: 3fefb5b46c94993362243c5f9a472057

View file

@ -6,7 +6,23 @@ include { GATK4_SPLITNCIGARREADS } from '../../../../modules/gatk4/splitncigarre
workflow test_gatk4_splitncigarreads { workflow test_gatk4_splitncigarreads {
input = [ [ id:'test' ], // meta map input = [ [ id:'test' ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true) ] file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true),
[],
[]
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['sarscov2']['genome']['genome_fasta_fai'], checkIfExists: true)
dict = file(params.test_data['sarscov2']['genome']['genome_dict'], checkIfExists: true)
GATK4_SPLITNCIGARREADS ( input, fasta, fai, dict )
}
workflow test_gatk4_splitncigarreads_intervals {
input = [ [ id:'test' ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam_bai'], checkIfExists: true),
file(params.test_data['sarscov2']['genome']['test_bed'], checkIfExists: true)
] ]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true) fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['sarscov2']['genome']['genome_fasta_fai'], checkIfExists: true) fai = file(params.test_data['sarscov2']['genome']['genome_fasta_fai'], checkIfExists: true)

View file

@ -5,5 +5,14 @@
- gatk4/splitncigarreads - gatk4/splitncigarreads
files: files:
- path: output/gatk4/test.bam - path: output/gatk4/test.bam
md5sum: ceed15c0bd64ff5c38d3816905933b0b md5sum: 436d8e31285c6b588bdd1c7f1d07f6f2
- path: output/gatk4/versions.yml
- name: gatk4 splitncigarreads test_gatk4_splitncigarreads_intervals
command: nextflow run tests/modules/gatk4/splitncigarreads -entry test_gatk4_splitncigarreads_intervals -c tests/config/nextflow.config
tags:
- gatk4
- gatk4/splitncigarreads
files:
- path: output/gatk4/test.bam
md5sum: cd56e3225950f519fd47164cca60a0bb
- path: output/gatk4/versions.yml - path: output/gatk4/versions.yml

View file

@ -0,0 +1,21 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { UNTAR } from '../../../../modules/untar/main.nf'
include { KAIJU_KAIJU } from '../../../../modules/kaiju/kaiju/main.nf'
include { KAIJU_KAIJU2TABLE } from '../../../../modules/kaiju/kaiju2table/main.nf'
workflow test_kaiju_kaiju_single_end {
input = [
[ id:'test', single_end:true ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
db = [ [], file(params.test_data['sarscov2']['genome']['kaiju_tar_gz'], checkIfExists: true) ]
taxon_rank = "species"
ch_db = UNTAR ( db )
KAIJU_KAIJU ( input, ch_db.untar.map{ it[1] } )
KAIJU_KAIJU2TABLE ( KAIJU_KAIJU.out.results, ch_db.untar.map{ it[1] }, taxon_rank )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,9 @@
- name: kaiju kaiju2table test_kaiju_kaiju_single_end
command: nextflow run tests/modules/kaiju/kaiju2table -entry test_kaiju_kaiju_single_end -c tests/config/nextflow.config
tags:
- kaiju
- kaiju/kaiju2table
files:
- path: output/kaiju/test.txt
md5sum: 0d9f8fd36fcf2888296ae12632c5f0a8
- path: output/kaiju/versions.yml

View file

@ -12,7 +12,7 @@ workflow test_kraken2_kraken2_single_end {
db = [ [], file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true) ] db = [ [], file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true) ]
UNTAR ( db ) UNTAR ( db )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] } ) KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] }, true, false )
} }
workflow test_kraken2_kraken2_paired_end { workflow test_kraken2_kraken2_paired_end {
@ -23,5 +23,15 @@ workflow test_kraken2_kraken2_paired_end {
db = [ [], file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true) ] db = [ [], file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true) ]
UNTAR ( db ) UNTAR ( db )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] } ) KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] }, true, false )
}
workflow test_kraken2_kraken2_classifyreads {
input = [ [ id:'test', single_end:true ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) ]
]
db = [ [], file(params.test_data['sarscov2']['genome']['kraken2_tar_gz'], checkIfExists: true) ]
UNTAR ( db )
KRAKEN2_KRAKEN2 ( input, UNTAR.out.untar.map{ it[1] }, false, true )
} }

View file

@ -1,29 +1,43 @@
- name: kraken2 kraken2 single-end - name: kraken2 kraken2 test_kraken2_kraken2_single_end
command: nextflow run ./tests/modules/kraken2/kraken2 -entry test_kraken2_kraken2_single_end -c ./tests/config/nextflow.config -c ./tests/modules/kraken2/kraken2/nextflow.config command: nextflow run tests/modules/kraken2/kraken2 -entry test_kraken2_kraken2_single_end -c tests/config/nextflow.config
tags: tags:
- kraken2 - kraken2
- kraken2/kraken2 - kraken2/kraken2
files: files:
- path: output/kraken2/test.classified.fastq.gz - path: output/kraken2/test.classified.fastq.gz
should_exist: true
- path: output/kraken2/test.unclassified.fastq.gz
should_exist: true
- path: output/kraken2/test.kraken2.report.txt - path: output/kraken2/test.kraken2.report.txt
md5sum: 4227755fe40478b8d7dc8634b489761e md5sum: 4227755fe40478b8d7dc8634b489761e
- path: output/kraken2/test.unclassified.fastq.gz
- path: output/kraken2/versions.yml
md5sum: 6e3ad947ac8dee841a89216071c181cc
- path: output/untar/versions.yml
- name: kraken2 kraken2 paired-end - name: kraken2 kraken2 test_kraken2_kraken2_paired_end
command: nextflow run ./tests/modules/kraken2/kraken2 -entry test_kraken2_kraken2_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/kraken2/kraken2/nextflow.config command: nextflow run tests/modules/kraken2/kraken2 -entry test_kraken2_kraken2_paired_end -c tests/config/nextflow.config
tags: tags:
- kraken2 - kraken2
- kraken2/kraken2 - kraken2/kraken2
files: files:
- path: output/kraken2/test.classified_1.fastq.gz - path: output/kraken2/test.classified_1.fastq.gz
should_exist: true
- path: output/kraken2/test.classified_2.fastq.gz - path: output/kraken2/test.classified_2.fastq.gz
should_exist: true
- path: output/kraken2/test.unclassified_1.fastq.gz
should_exist: true
- path: output/kraken2/test.unclassified_2.fastq.gz
should_exist: true
- path: output/kraken2/test.kraken2.report.txt - path: output/kraken2/test.kraken2.report.txt
md5sum: 4227755fe40478b8d7dc8634b489761e md5sum: 4227755fe40478b8d7dc8634b489761e
- path: output/kraken2/test.unclassified_1.fastq.gz
- path: output/kraken2/test.unclassified_2.fastq.gz
- path: output/kraken2/versions.yml
md5sum: 604482fe7a4519f890fae9c8beb1bd6e
- path: output/untar/versions.yml
- name: kraken2 kraken2 test_kraken2_kraken2_classifyreads
command: nextflow run tests/modules/kraken2/kraken2 -entry test_kraken2_kraken2_classifyreads -c tests/config/nextflow.config
tags:
- kraken2
- kraken2/kraken2
files:
- path: output/kraken2/test.kraken2.classifiedreads.txt
md5sum: e7a90531f0d8d777316515c36fe4cae0
- path: output/kraken2/test.kraken2.report.txt
md5sum: 4227755fe40478b8d7dc8634b489761e
- path: output/kraken2/versions.yml
md5sum: 3488c304259e83c5bea573403293fce9
- path: output/untar/versions.yml

View file

@ -9,8 +9,11 @@ workflow test_minimap2_align_single_end {
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)] [ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)]
] ]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true) fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
bam_format = true
cigar_paf_format = false
cigar_bam = false
MINIMAP2_ALIGN ( input, fasta ) MINIMAP2_ALIGN ( input, fasta, bam_format, cigar_paf_format, cigar_bam)
} }
workflow test_minimap2_align_paired_end { workflow test_minimap2_align_paired_end {
@ -19,6 +22,9 @@ workflow test_minimap2_align_paired_end {
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ] file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
] ]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true) fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
bam_format = true
cigar_paf_format = false
cigar_bam = false
MINIMAP2_ALIGN ( input, fasta ) MINIMAP2_ALIGN ( input, fasta, bam_format, cigar_paf_format, cigar_bam )
} }

View file

@ -1,17 +1,17 @@
- name: minimap2 align single-end - name: minimap2 align test_minimap2_align_single_end
command: nextflow run ./tests/modules/minimap2/align -entry test_minimap2_align_single_end -c ./tests/config/nextflow.config -c ./tests/modules/minimap2/align/nextflow.config command: nextflow run tests/modules/minimap2/align -entry test_minimap2_align_single_end -c tests/config/nextflow.config
tags: tags:
- minimap2 - minimap2
- minimap2/align - minimap2/align
files: files:
- path: ./output/minimap2/test.paf - path: output/minimap2/test.bam
md5sum: 70e8cf299ee3ecd33e629d10c1f588ce - path: output/minimap2/versions.yml
- name: minimap2 align paired-end - name: minimap2 align test_minimap2_align_paired_end
command: nextflow run ./tests/modules/minimap2/align -entry test_minimap2_align_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/minimap2/align/nextflow.config command: nextflow run tests/modules/minimap2/align -entry test_minimap2_align_paired_end -c tests/config/nextflow.config
tags: tags:
- minimap2 - minimap2
- minimap2/align - minimap2/align
files: files:
- path: ./output/minimap2/test.paf - path: output/minimap2/test.bam
md5sum: 5e7b55a26bf0ea3a2843423d3e0b9a28 - path: output/minimap2/versions.yml

View file

@ -5,4 +5,4 @@
- picard/collecthsmetrics - picard/collecthsmetrics
files: files:
# The file can't be md5'd consistently # The file can't be md5'd consistently
- path: output/picard/test_collecthsmetrics.txt - path: output/picard/test.CollectHsMetrics.coverage_metrics

View file

@ -42,7 +42,7 @@
- path: output/rsem/rsem/genome.transcripts.fa - path: output/rsem/rsem/genome.transcripts.fa
md5sum: 050c521a2719c2ae48267c1e65218f29 md5sum: 050c521a2719c2ae48267c1e65218f29
- path: output/rsem/rsem/genomeParameters.txt - path: output/rsem/rsem/genomeParameters.txt
md5sum: 2fe3a030e1706c3e8cd4df3818e6dd2f md5sum: df5a456e3242520cc36e0083a6a7d9dd
- path: output/rsem/rsem/sjdbInfo.txt - path: output/rsem/rsem/sjdbInfo.txt
md5sum: 5690ea9d9f09f7ff85b7fd47bd234903 md5sum: 5690ea9d9f09f7ff85b7fd47bd234903
- path: output/rsem/rsem/sjdbList.fromGTF.out.tab - path: output/rsem/rsem/sjdbList.fromGTF.out.tab
@ -63,4 +63,4 @@
- path: output/rsem/test.stat/test.theta - path: output/rsem/test.stat/test.theta
md5sum: de2e4490c98cc5383a86ae8225fd0a28 md5sum: de2e4490c98cc5383a86ae8225fd0a28
- path: output/rsem/test.transcript.bam - path: output/rsem/test.transcript.bam
md5sum: 7846491086c478858419667d60f18edd md5sum: ed681d39f5700ffc74d6321525330d93

View file

@ -0,0 +1,17 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SAMTOOLS_BAMTOCRAM } from '../../../../modules/samtools/bamtocram/main.nf'
workflow test_samtools_bamtocram {
input = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_paired_end_sorted_bam_bai'], checkIfExists: true)]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['sarscov2']['genome']['genome_fasta_fai'], checkIfExists: true)
SAMTOOLS_BAMTOCRAM ( input, fasta, fai )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,9 @@
- name: samtools bamtocram test_samtools_bamtocram
command: nextflow run ./tests/modules/samtools/bamtocram -entry test_samtools_bamtocram -c ./tests/config/nextflow.config -c ./tests/modules/samtools/bamtocram/nextflow.config
tags:
- samtools/bamtocram
- samtools
files:
- path: output/samtools/test.cram
- path: output/samtools/test.cram.crai
- path: output/samtools/versions.yml

View file

@ -0,0 +1,13 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { SAMTOOLS_COLLATEFASTQ } from '../../../../modules/samtools/collatefastq/main.nf'
workflow test_samtools_collatefastq {
input = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true)
]
SAMTOOLS_COLLATEFASTQ ( input )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,14 @@
- name: samtools fastq test_samtools_collatefastq
command: nextflow run ./tests/modules/samtools/collatefastq -entry test_samtools_collatefastq -c ./tests/config/nextflow.config -c ./tests/modules/samtools/collatefastq/nextflow.config
tags:
- samtools
- samtools/collatefastq
files:
- path: output/samtools/test_1.fq.gz
md5sum: 829732de4e937edca90f27b07e5b501a
- path: output/samtools/test_2.fq.gz
md5sum: ef27d3809e495620fd93df894280c03a
- path: output/samtools/test_other.fq.gz
md5sum: 709872fc2910431b1e8b7074bfe38c67
- path: output/samtools/test_singleton.fq.gz
md5sum: 709872fc2910431b1e8b7074bfe38c67

View file

@ -6,7 +6,8 @@ include { SAMTOOLS_VIEW } from '../../../../modules/samtools/view/main.nf'
workflow test_samtools_view { workflow test_samtools_view {
input = [ [ id:'test', single_end:false ], // meta map input = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true) file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true),
[]
] ]
SAMTOOLS_VIEW ( input, [] ) SAMTOOLS_VIEW ( input, [] )

View file

@ -5,7 +5,6 @@ nextflow.enable.dsl = 2
include { EXPANSIONHUNTER } from '../../../modules/expansionhunter/main.nf' include { EXPANSIONHUNTER } from '../../../modules/expansionhunter/main.nf'
include { STRANGER } from '../../../modules/stranger/main.nf' include { STRANGER } from '../../../modules/stranger/main.nf'
workflow test_stranger {
input = [ [ id:'test', gender:'male' ], // meta map input = [ [ id:'test', gender:'male' ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true), file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true),
@ -14,6 +13,18 @@ workflow test_stranger {
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true) fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
variant_catalog = file(params.test_data['homo_sapiens']['genome']['repeat_expansions'], checkIfExists: true) variant_catalog = file(params.test_data['homo_sapiens']['genome']['repeat_expansions'], checkIfExists: true)
workflow test_stranger {
EXPANSIONHUNTER ( input, fasta, variant_catalog ) EXPANSIONHUNTER ( input, fasta, variant_catalog )
STRANGER ( EXPANSIONHUNTER.out.vcf ) STRANGER ( EXPANSIONHUNTER.out.vcf, variant_catalog )
}
workflow test_stranger_without_optional_variant_catalog {
EXPANSIONHUNTER ( input, fasta, variant_catalog )
STRANGER ( EXPANSIONHUNTER.out.vcf, [] )
}
workflow test_stranger_without_optional_variant_catalog_stubs {
EXPANSIONHUNTER ( input, fasta, variant_catalog )
STRANGER ( EXPANSIONHUNTER.out.vcf, [] )
} }

View file

@ -8,6 +8,30 @@
- path: output/expansionhunter/versions.yml - path: output/expansionhunter/versions.yml
md5sum: f3962a6eecfddf9682414c0f605a885a md5sum: f3962a6eecfddf9682414c0f605a885a
- path: output/stranger/test.vcf.gz - path: output/stranger/test.vcf.gz
md5sum: bbe15159195681d5c18596d3ad85c78f md5sum: 68b0ca1319851134ffa8793a4704dc11
- path: output/stranger/versions.yml - path: output/stranger/versions.yml
md5sum: 5ec35fd835fb1be50bc3e7c004310fc0 md5sum: 5ec35fd835fb1be50bc3e7c004310fc0
- name: stranger test_stranger_without_optional_variant_catalog
command: nextflow run tests/modules/stranger -entry test_stranger_without_optional_variant_catalog -c tests/config/nextflow.config
tags:
- stranger
files:
- path: output/expansionhunter/test.vcf
md5sum: cfd4a1d35c0e469b99eb6aaa6d22de76
- path: output/expansionhunter/versions.yml
md5sum: c95af9e6d8cd9bd2ce1090ca4e7a6020
- path: output/stranger/test.vcf.gz
md5sum: bbe15159195681d5c18596d3ad85c78f
- path: output/stranger/versions.yml
md5sum: 8558542a007e90ea5dcdceed3f12585d
- name: stranger test_stranger_without_optional_variant_catalog_stubs
command: nextflow run tests/modules/stranger -entry test_stranger_without_optional_variant_catalog -c tests/config/nextflow.config -stub-run
tags:
- stranger
files:
- path: output/expansionhunter/test.vcf
- path: output/expansionhunter/versions.yml
- path: output/stranger/test.vcf.gz
- path: output/stranger/versions.yml

View file

@ -2,7 +2,7 @@
nextflow.enable.dsl = 2 nextflow.enable.dsl = 2
include { STRINGTIE } from '../../../../modules/stringtie/stringtie/main.nf' include { STRINGTIE_STRINGTIE } from '../../../../modules/stringtie/stringtie/main.nf'
include { STRINGTIE_MERGE } from '../../../../modules/stringtie/merge/main.nf' include { STRINGTIE_MERGE } from '../../../../modules/stringtie/merge/main.nf'
/* /*
@ -15,8 +15,8 @@ workflow test_stringtie_forward_merge {
] ]
annotation_gtf = file(params.test_data['homo_sapiens']['genome']['genome_gtf'], checkIfExists: true) annotation_gtf = file(params.test_data['homo_sapiens']['genome']['genome_gtf'], checkIfExists: true)
STRINGTIE ( input, annotation_gtf ) STRINGTIE_STRINGTIE ( input, annotation_gtf )
STRINGTIE STRINGTIE_STRINGTIE
.out .out
.transcript_gtf .transcript_gtf
.map { it -> it[1] } .map { it -> it[1] }
@ -35,8 +35,8 @@ workflow test_stringtie_reverse_merge {
] ]
annotation_gtf = file(params.test_data['homo_sapiens']['genome']['genome_gtf'], checkIfExists: true) annotation_gtf = file(params.test_data['homo_sapiens']['genome']['genome_gtf'], checkIfExists: true)
STRINGTIE ( input, annotation_gtf ) STRINGTIE_STRINGTIE ( input, annotation_gtf )
STRINGTIE STRINGTIE_STRINGTIE
.out .out
.transcript_gtf .transcript_gtf
.map { it -> it[1] } .map { it -> it[1] }

View file

@ -5,7 +5,7 @@
- stringtie/merge - stringtie/merge
files: files:
- path: output/stringtie/stringtie.merged.gtf - path: output/stringtie/stringtie.merged.gtf
md5sum: 9fab7049ef2eafdea246fc787d1def40 md5sum: d959eb2fab0db48ded7275e0a2e83c05
- path: output/stringtie/test.ballgown/e2t.ctab - path: output/stringtie/test.ballgown/e2t.ctab
md5sum: 9ae42e056c955a88a883e5e917840d77 md5sum: 9ae42e056c955a88a883e5e917840d77
- path: output/stringtie/test.ballgown/e_data.ctab - path: output/stringtie/test.ballgown/e_data.ctab
@ -17,11 +17,10 @@
- path: output/stringtie/test.ballgown/t_data.ctab - path: output/stringtie/test.ballgown/t_data.ctab
md5sum: 92a98902784e7406ffe054d2adbabc7c md5sum: 92a98902784e7406ffe054d2adbabc7c
- path: output/stringtie/test.coverage.gtf - path: output/stringtie/test.coverage.gtf
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: output/stringtie/test.gene.abundance.txt - path: output/stringtie/test.gene.abundance.txt
md5sum: 9708811bcefe0f6384293d6f419f3250 md5sum: 8bcd8e2730ed3337e2730186dbc184f3
- path: output/stringtie/test.transcripts.gtf - path: output/stringtie/test.transcripts.gtf
md5sum: 0e42709bfe30c2c7f2574ba664f5fa9f md5sum: a914bd55b68a4b5f607738b17861e362
- name: stringtie merge test_stringtie_reverse_merge - name: stringtie merge test_stringtie_reverse_merge
command: nextflow run ./tests/modules/stringtie/merge -entry test_stringtie_reverse_merge -c ./tests/config/nextflow.config -c ./tests/modules/stringtie/merge/nextflow.config command: nextflow run ./tests/modules/stringtie/merge -entry test_stringtie_reverse_merge -c ./tests/config/nextflow.config -c ./tests/modules/stringtie/merge/nextflow.config
@ -30,7 +29,7 @@
- stringtie/merge - stringtie/merge
files: files:
- path: output/stringtie/stringtie.merged.gtf - path: output/stringtie/stringtie.merged.gtf
md5sum: afc461bb3cbc368f268a7a45c1b54497 md5sum: 6da479298d73d5b3216d4e1576a2bdf4
- path: output/stringtie/test.ballgown/e2t.ctab - path: output/stringtie/test.ballgown/e2t.ctab
md5sum: 9ae42e056c955a88a883e5e917840d77 md5sum: 9ae42e056c955a88a883e5e917840d77
- path: output/stringtie/test.ballgown/e_data.ctab - path: output/stringtie/test.ballgown/e_data.ctab
@ -42,8 +41,7 @@
- path: output/stringtie/test.ballgown/t_data.ctab - path: output/stringtie/test.ballgown/t_data.ctab
md5sum: 92a98902784e7406ffe054d2adbabc7c md5sum: 92a98902784e7406ffe054d2adbabc7c
- path: output/stringtie/test.coverage.gtf - path: output/stringtie/test.coverage.gtf
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: output/stringtie/test.gene.abundance.txt - path: output/stringtie/test.gene.abundance.txt
md5sum: 94b85145d60ab1b80a7f0f6cf08418b0 md5sum: f289f41b3ba1b9f0aa05d14408f1a5da
- path: output/stringtie/test.transcripts.gtf - path: output/stringtie/test.transcripts.gtf
md5sum: 3196e3d50fd461aae6408e0a70acae68 md5sum: 9dcdc9577c0fdbb25089eda210267546

View file

@ -2,7 +2,7 @@
nextflow.enable.dsl = 2 nextflow.enable.dsl = 2
include { STRINGTIE } from '../../../../modules/stringtie/stringtie/main.nf' include { STRINGTIE_STRINGTIE } from '../../../../modules/stringtie/stringtie/main.nf'
// //
// Test with forward strandedness // Test with forward strandedness
// //
@ -13,7 +13,7 @@ workflow test_stringtie_forward {
] ]
annotation_gtf = file(params.test_data['sarscov2']['genome']['genome_gtf'], checkIfExists: true) annotation_gtf = file(params.test_data['sarscov2']['genome']['genome_gtf'], checkIfExists: true)
STRINGTIE ( input, annotation_gtf ) STRINGTIE_STRINGTIE ( input, annotation_gtf )
} }
// //
@ -26,5 +26,5 @@ workflow test_stringtie_reverse {
] ]
annotation_gtf = file(params.test_data['sarscov2']['genome']['genome_gtf'], checkIfExists: true) annotation_gtf = file(params.test_data['sarscov2']['genome']['genome_gtf'], checkIfExists: true)
STRINGTIE ( input, annotation_gtf ) STRINGTIE_STRINGTIE ( input, annotation_gtf )
} }

View file

@ -8,7 +8,6 @@
- path: ./output/stringtie/test.gene.abundance.txt - path: ./output/stringtie/test.gene.abundance.txt
md5sum: 7d8bce7f2a922e367cedccae7267c22e md5sum: 7d8bce7f2a922e367cedccae7267c22e
- path: ./output/stringtie/test.coverage.gtf - path: ./output/stringtie/test.coverage.gtf
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: ./output/stringtie/test.ballgown/e_data.ctab - path: ./output/stringtie/test.ballgown/e_data.ctab
md5sum: 6b4cf69bc03f3f69890f972a0e8b7471 md5sum: 6b4cf69bc03f3f69890f972a0e8b7471
- path: ./output/stringtie/test.ballgown/i_data.ctab - path: ./output/stringtie/test.ballgown/i_data.ctab
@ -30,7 +29,6 @@
- path: ./output/stringtie/test.gene.abundance.txt - path: ./output/stringtie/test.gene.abundance.txt
md5sum: 7385b870b955dae2c2ab78a70cf05cce md5sum: 7385b870b955dae2c2ab78a70cf05cce
- path: ./output/stringtie/test.coverage.gtf - path: ./output/stringtie/test.coverage.gtf
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: ./output/stringtie/test.ballgown/e_data.ctab - path: ./output/stringtie/test.ballgown/e_data.ctab
md5sum: 879b6696029d19c4737b562e9d149218 md5sum: 879b6696029d19c4737b562e9d149218
- path: ./output/stringtie/test.ballgown/i_data.ctab - path: ./output/stringtie/test.ballgown/i_data.ctab

View file

@ -4,10 +4,18 @@ nextflow.enable.dsl = 2
include { TABIX_BGZIP } from '../../../../modules/tabix/bgzip/main.nf' include { TABIX_BGZIP } from '../../../../modules/tabix/bgzip/main.nf'
workflow test_tabix_bgzip { workflow test_tabix_bgzip_compress {
input = [ [ id:'test' ], // meta map input = [ [ id:'test' ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_vcf'], checkIfExists: true) ] [ file(params.test_data['sarscov2']['illumina']['test_vcf'], checkIfExists: true) ]
] ]
TABIX_BGZIP ( input ) TABIX_BGZIP ( input )
} }
workflow test_tabix_bgzip_decompress {
input = [ [ id:'test' ], // meta map
[ file(params.test_data['sarscov2']['genome']['test_bed_gz'], checkIfExists: true) ]
]
TABIX_BGZIP ( input )
}

View file

@ -1,8 +1,16 @@
- name: tabix bgzip - name: tabix bgzip compress
command: nextflow run ./tests/modules/tabix/bgzip -entry test_tabix_bgzip -c ./tests/config/nextflow.config -c ./tests/modules/tabix/bgzip/nextflow.config command: nextflow run ./tests/modules/tabix/bgzip -entry test_tabix_bgzip_compress -c ./tests/config/nextflow.config -c ./tests/modules/tabix/bgzip/nextflow.config
tags: tags:
- tabix - tabix
- tabix/bgzip - tabix/bgzip
files: files:
- path: ./output/tabix/test.vcf.gz - path: ./output/tabix/test.vcf.gz
md5sum: fc178eb342a91dc0d1d568601ad8f8e2 md5sum: fc178eb342a91dc0d1d568601ad8f8e2
- name: tabix bgzip decompress
command: nextflow run ./tests/modules/tabix/bgzip -entry test_tabix_bgzip_decompress -c ./tests/config/nextflow.config -c ./tests/modules/tabix/bgzip/nextflow.config
tags:
- tabix
- tabix/bgzip
files:
- path: ./output/tabix/test.bed
md5sum: fe4053cf4de3aebbdfc3be2efb125a74

View file

@ -9,6 +9,7 @@
- path: output/tiddit/test.signals.tab - path: output/tiddit/test.signals.tab
md5sum: dab4b2fec4ddf8eb1c23005b0770150e md5sum: dab4b2fec4ddf8eb1c23005b0770150e
- path: output/tiddit/test.vcf - path: output/tiddit/test.vcf
md5sum: bdce14ae8292bf3deb81f6f255baf859
- name: tiddit sv no ref - name: tiddit sv no ref
command: nextflow run ./tests/modules/tiddit/sv -entry test_tiddit_sv_no_ref -c ./tests/config/nextflow.config -c ./tests/modules/tiddit/sv/nextflow.config command: nextflow run ./tests/modules/tiddit/sv -entry test_tiddit_sv_no_ref -c ./tests/config/nextflow.config -c ./tests/modules/tiddit/sv/nextflow.config
@ -21,3 +22,4 @@
- path: output/tiddit/test.signals.tab - path: output/tiddit/test.signals.tab
md5sum: dab4b2fec4ddf8eb1c23005b0770150e md5sum: dab4b2fec4ddf8eb1c23005b0770150e
- path: output/tiddit/test.vcf - path: output/tiddit/test.vcf
md5sum: 3d0e83a8199b2bdb81cfe3e6b12bf64b