mirror of
https://github.com/MillironX/nf-core_modules.git
synced 2024-12-22 11:08:17 +00:00
commit
edd508487b
39 changed files with 288 additions and 233 deletions
11
.github/markdownlint.yml
vendored
Normal file
11
.github/markdownlint.yml
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
# Markdownlint configuration file
|
||||||
|
default: true,
|
||||||
|
line-length: false
|
||||||
|
no-multiple-blanks: 0
|
||||||
|
blanks-around-headers: false
|
||||||
|
blanks-around-lists: false
|
||||||
|
header-increment: false
|
||||||
|
no-duplicate-header:
|
||||||
|
siblings_only: true
|
||||||
|
ul-indent:
|
||||||
|
indent: 4
|
50
.github/workflows/lint-code.yml
vendored
Normal file
50
.github/workflows/lint-code.yml
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
name: Lint Code
|
||||||
|
on: [push, pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
Markdown:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v1
|
||||||
|
with:
|
||||||
|
node-version: '10'
|
||||||
|
|
||||||
|
- name: Install markdownlint
|
||||||
|
run: npm install -g markdownlint-cli
|
||||||
|
|
||||||
|
- name: Run Markdownlint
|
||||||
|
run: markdownlint ${GITHUB_WORKSPACE} -c ${GITHUB_WORKSPACE}/.github/markdownlint.yml
|
||||||
|
|
||||||
|
EditorConfig:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v1
|
||||||
|
with:
|
||||||
|
node-version: '10'
|
||||||
|
|
||||||
|
- name: Install ECLint
|
||||||
|
run: npm install -g eclint
|
||||||
|
|
||||||
|
- name: Run ECLint check
|
||||||
|
run: eclint check $(git ls-files | grep -v test)
|
||||||
|
|
||||||
|
YAML:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Check out repository
|
||||||
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
|
- name: Install NodeJS
|
||||||
|
uses: actions/setup-node@v1
|
||||||
|
with:
|
||||||
|
node-version: '10'
|
||||||
|
|
||||||
|
- name: Install yaml-lint
|
||||||
|
run: npm install -g yaml-lint
|
||||||
|
|
||||||
|
- name: Run yaml-lint
|
||||||
|
run: yamllint $(find ${GITHUB_WORKSPACE} -type f -name "*.yaml" -or -name "*.yml")
|
4
.gitmodules
vendored
4
.gitmodules
vendored
|
@ -1,3 +1,3 @@
|
||||||
[submodule "test-datasets"]
|
[submodule "test-datasets"]
|
||||||
path = test-datasets
|
path = test-datasets
|
||||||
url = https://github.com/nf-core/test-datasets.git
|
url = https://github.com/nf-core/test-datasets.git
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
A repository for hosting nextflow [`DSL2`](https://www.nextflow.io/docs/edge/dsl2.htmlhttps://www.nextflow.io/docs/edge/dsl2.html) module files containing tool-specific process definitions and their associated documentation.
|
A repository for hosting nextflow [`DSL2`](https://www.nextflow.io/docs/edge/dsl2.htmlhttps://www.nextflow.io/docs/edge/dsl2.html) module files containing tool-specific process definitions and their associated documentation.
|
||||||
|
|
||||||
## Table of contents
|
## Table of contents
|
||||||
|
|
||||||
* [Using existing modules](#using-existing-modules)
|
* [Using existing modules](#using-existing-modules)
|
||||||
* [Configuration and parameters](#configuration-and-parameters)
|
* [Configuration and parameters](#configuration-and-parameters)
|
||||||
* [Offline usage](#offline-usage)
|
* [Offline usage](#offline-usage)
|
||||||
|
@ -20,7 +21,7 @@ The features offered by Nextflow DSL 2 can be used in various ways depending on
|
||||||
|
|
||||||
* *Module*: A `process`that can be used within different pipelines and is as atomic as possible i.e. cannot be split into another module. An example of this would be a module file containing the process definition for a single tool such as `FastQC`. This repository has been created to only host atomic module files that should be added to the `tools` sub-directory along with the required documentation, software and tests.
|
* *Module*: A `process`that can be used within different pipelines and is as atomic as possible i.e. cannot be split into another module. An example of this would be a module file containing the process definition for a single tool such as `FastQC`. This repository has been created to only host atomic module files that should be added to the `tools` sub-directory along with the required documentation, software and tests.
|
||||||
* *Sub-workflow*: A chain of multiple modules that offer a higher-level of functionality within the context of a pipeline. For example, a sub-workflow to run multiple QC tools with FastQ files as input. Sub-workflows should be shipped with the pipeline implementation and if required they should be shared amongst different pipelines directly from there. As it stands, this repository will not host sub-workflows.
|
* *Sub-workflow*: A chain of multiple modules that offer a higher-level of functionality within the context of a pipeline. For example, a sub-workflow to run multiple QC tools with FastQ files as input. Sub-workflows should be shipped with the pipeline implementation and if required they should be shared amongst different pipelines directly from there. As it stands, this repository will not host sub-workflows.
|
||||||
* *Workflow*: What DSL 1 users would consider an end-to-end pipeline. For example, from one or more inputs to a series of outputs. This can either be implemented using a large monolithic script as with DSL 1, or by using a combination of DSL 2 individual modules and sub-workflows.
|
* *Workflow*: What DSL 1 users would consider an end-to-end pipeline. For example, from one or more inputs to a series of outputs. This can either be implemented using a large monolithic script as with DSL 1, or by using a combination of DSL 2 individual modules and sub-workflows.
|
||||||
|
|
||||||
## Using existing modules
|
## Using existing modules
|
||||||
|
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit aae85a5c9c72238959108212481ce83bae569709
|
Subproject commit ddbd0c4cf7f1721c78673c4dcc91fcd7940e67f8
|
|
@ -2,51 +2,51 @@ nextflow.preview.dsl=2
|
||||||
params.genome = ''
|
params.genome = ''
|
||||||
|
|
||||||
process BOWTIE2 {
|
process BOWTIE2 {
|
||||||
// depending on the genome used one might want/need to adjust the memory settings.
|
// depending on the genome used one might want/need to adjust the memory settings.
|
||||||
// For the E. coli test data this is probably not required
|
// For the E. coli test data this is probably not required
|
||||||
|
|
||||||
|
// label 'bigMem'
|
||||||
|
// label 'multiCore'
|
||||||
|
|
||||||
// label 'bigMem'
|
|
||||||
// label 'multiCore'
|
|
||||||
|
|
||||||
input:
|
input:
|
||||||
tuple val(name), path(reads)
|
tuple val(name), path(reads)
|
||||||
val (outdir)
|
val (outdir)
|
||||||
val (bowtie2_args)
|
val (bowtie2_args)
|
||||||
val (verbose)
|
val (verbose)
|
||||||
|
|
||||||
output:
|
output:
|
||||||
path "*bam", emit: bam
|
path "*bam", emit: bam
|
||||||
path "*stats.txt", emit: stats
|
path "*stats.txt", emit: stats
|
||||||
|
|
||||||
publishDir "$outdir/bowtie2",
|
publishDir "$outdir/bowtie2",
|
||||||
mode: "copy", overwrite: true
|
mode: "copy", overwrite: true
|
||||||
|
|
||||||
script:
|
script:
|
||||||
if (verbose){
|
if (verbose){
|
||||||
println ("[MODULE] BOWTIE2 ARGS: " + bowtie2_args)
|
println ("[MODULE] BOWTIE2 ARGS: " + bowtie2_args)
|
||||||
}
|
}
|
||||||
|
|
||||||
cores = 4
|
cores = 4
|
||||||
|
|
||||||
readString = ""
|
readString = ""
|
||||||
|
|
||||||
// Options we add are
|
// Options we add are
|
||||||
bowtie2_options = bowtie2_args
|
bowtie2_options = bowtie2_args
|
||||||
bowtie2_options += " --no-unal " // We don't need unaligned reads in the BAM file
|
bowtie2_options += " --no-unal " // We don't need unaligned reads in the BAM file
|
||||||
|
|
||||||
// single-end / paired-end distinction. Might also be handled via params.single_end
|
|
||||||
if (reads instanceof List) {
|
|
||||||
readString = "-1 " + reads[0] + " -2 " + reads[1]
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
readString = "-U " + reads
|
|
||||||
}
|
|
||||||
|
|
||||||
index = params.genome["bowtie2"]
|
|
||||||
bowtie2_name = name + "_" + params.genome["name"]
|
|
||||||
|
|
||||||
"""
|
// single-end / paired-end distinction. Might also be handled via params.single_end
|
||||||
bowtie2 -x ${index} -p ${cores} ${bowtie2_options} ${readString} 2>${bowtie2_name}_bowtie2_stats.txt | samtools view -bS -F 4 -F 8 -F 256 -> ${bowtie2_name}_bowtie2.bam
|
if (reads instanceof List) {
|
||||||
"""
|
readString = "-1 " + reads[0] + " -2 " + reads[1]
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
readString = "-U " + reads
|
||||||
|
}
|
||||||
|
|
||||||
|
index = params.genome["bowtie2"]
|
||||||
|
bowtie2_name = name + "_" + params.genome["name"]
|
||||||
|
|
||||||
|
"""
|
||||||
|
bowtie2 -x ${index} -p ${cores} ${bowtie2_options} ${readString} 2>${bowtie2_name}_bowtie2_stats.txt | samtools view -bS -F 4 -F 8 -F 256 -> ${bowtie2_name}_bowtie2.bam
|
||||||
|
"""
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
name: Bowtie 2
|
name: Bowtie 2
|
||||||
description: Ultrafast alignment to reference genome
|
description: Ultrafast alignment to reference genome
|
||||||
keywords:
|
keywords:
|
||||||
- Alignment
|
- Alignment
|
||||||
|
@ -28,10 +28,10 @@ output:
|
||||||
- report:
|
- report:
|
||||||
type: file
|
type: file
|
||||||
description: mapping statistics report
|
description: mapping statistics report
|
||||||
pattern: *bowtie2_stats.txt
|
pattern: "*bowtie2_stats.txt"
|
||||||
- alignment:
|
- alignment:
|
||||||
type: file
|
type: file
|
||||||
description: alignment file in BAM format
|
description: alignment file in BAM format
|
||||||
pattern: *bowtie2.bam
|
pattern: "*bowtie2.bam"
|
||||||
authors:
|
authors:
|
||||||
- @FelixKrueger
|
- "@FelixKrueger"
|
||||||
|
|
|
@ -13,4 +13,4 @@ process bwa_index {
|
||||||
"""
|
"""
|
||||||
bwa index ${fasta}
|
bwa index ${fasta}
|
||||||
"""
|
"""
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,12 +14,12 @@ input:
|
||||||
- input:
|
- input:
|
||||||
type: file
|
type: file
|
||||||
description: Input fasta file
|
description: Input fasta file
|
||||||
pattern: *.{fasta,fa}
|
pattern: "*.{fasta,fa}"
|
||||||
output:
|
output:
|
||||||
-
|
-
|
||||||
- index:
|
- index:
|
||||||
type: file
|
type: file
|
||||||
description: bwa indexes file
|
description: bwa indexes file
|
||||||
pattern: *.{fasta,fa}.{amb,ann,bwt,pac,sa}
|
pattern: "*.{fasta,fa}.{amb,ann,bwt,pac,sa}"
|
||||||
authors:
|
authors:
|
||||||
- @maxulysse
|
- "@maxulysse"
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
FROM nfcore/base
|
FROM nfcore/base
|
||||||
LABEL authors="Jeremy Guntoro" \
|
LABEL authors="Jeremy Guntoro" \
|
||||||
description="Docker image containing all requirements for nf-core/modules/bwa/mem module"
|
description="Docker image containing all requirements for nf-core/modules/bwa/mem module"
|
||||||
|
|
||||||
COPY environment.yml /
|
COPY environment.yml /
|
||||||
RUN conda env create -f /environment.yml && conda clean -a
|
RUN conda env create -f /environment.yml && conda clean -a
|
||||||
ENV PATH /opt/conda/envs/nf-core-bwa-mem/bin:$PATH
|
ENV PATH /opt/conda/envs/nf-core-bwa-mem/bin:$PATH
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -19,11 +19,11 @@ input:
|
||||||
- reads:
|
- reads:
|
||||||
type: file
|
type: file
|
||||||
description: Input fastq file
|
description: Input fastq file
|
||||||
pattern: *.{fastq,fq}
|
pattern: "*.{fastq,fq}"
|
||||||
- index:
|
- index:
|
||||||
type: file
|
type: file
|
||||||
description: bwa indexes file
|
description: bwa indexes file
|
||||||
pattern: *.{amb,ann,bwt,pac,sa}
|
pattern: "*.{amb,ann,bwt,pac,sa}"
|
||||||
- prefix:
|
- prefix:
|
||||||
type: val
|
type: val
|
||||||
description: bwa index prefix, equivalent to index file names without extensions. Usually the reference genome file name unless otherwise specified.
|
description: bwa index prefix, equivalent to index file names without extensions. Usually the reference genome file name unless otherwise specified.
|
||||||
|
@ -32,11 +32,11 @@ output:
|
||||||
- bam:
|
- bam:
|
||||||
type: file
|
type: file
|
||||||
description: Output bam file
|
description: Output bam file
|
||||||
pattern: *.bam
|
pattern: "*.bam"
|
||||||
- bamindex:
|
- bamindex:
|
||||||
type: file
|
type: file
|
||||||
description: Output bam index file
|
description: Output bam index file
|
||||||
pattern: *.bai
|
pattern: "*.bai"
|
||||||
|
|
||||||
authors:
|
authors:
|
||||||
- @jeremy1805
|
- "@jeremy1805"
|
||||||
|
|
|
@ -13,27 +13,27 @@ process cutadapt {
|
||||||
forward_fq = "trimmed_1.fastq"
|
forward_fq = "trimmed_1.fastq"
|
||||||
reverse_fq = "trimmed_2.fastq"
|
reverse_fq = "trimmed_2.fastq"
|
||||||
|
|
||||||
|
|
||||||
if (params.singleEnd) {
|
if (params.singleEnd) {
|
||||||
processing = """
|
processing = """
|
||||||
cutadapt \
|
cutadapt \
|
||||||
-j ${task.cpus} \
|
-j ${task.cpus} \
|
||||||
-q $params.cutadapt_min_quality \
|
-q $params.cutadapt_min_quality \
|
||||||
--minimum-length $params.cutadapt_min_length \
|
--minimum-length $params.cutadapt_min_length \
|
||||||
--output ${forward_fq} \
|
--output ${forward_fq} \
|
||||||
${reads}
|
${reads}
|
||||||
"""
|
"""
|
||||||
} else {
|
} else {
|
||||||
processing = """
|
processing = """
|
||||||
cutadapt \
|
cutadapt \
|
||||||
-j ${task.cpus} \
|
-j ${task.cpus} \
|
||||||
-q $params.cutadapt_min_quality \
|
-q $params.cutadapt_min_quality \
|
||||||
--minimum-length $params.cutadapt_min_length \
|
--minimum-length $params.cutadapt_min_length \
|
||||||
--pair-filter=any \
|
--pair-filter=any \
|
||||||
--output ${forward_fq} \
|
--output ${forward_fq} \
|
||||||
--paired-output ${reverse_fq} ${reads}
|
--paired-output ${reverse_fq} ${reads}
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,10 +9,10 @@ tools:
|
||||||
description: |
|
description: |
|
||||||
Cutadapt finds and removes adapter sequences, primers, poly-A tails and other types of unwanted sequence
|
Cutadapt finds and removes adapter sequences, primers, poly-A tails and other types of unwanted sequence
|
||||||
from your high-throughput sequencing reads.
|
from your high-throughput sequencing reads.
|
||||||
|
|
||||||
Cleaning your data in this way is often required: Reads from small-RNA sequencing contain the 3’
|
Cleaning your data in this way is often required: Reads from small-RNA sequencing contain the 3’
|
||||||
sequencing adapter because the read is longer than the molecule that is sequenced. Amplicon reads
|
sequencing adapter because the read is longer than the molecule that is sequenced. Amplicon reads
|
||||||
start with a primer sequence. Poly-A tails are useful for pulling out RNA from your sample, but
|
start with a primer sequence. Poly-A tails are useful for pulling out RNA from your sample, but
|
||||||
often you don’t want them to be in your reads.
|
often you don’t want them to be in your reads.
|
||||||
homepage: https://cutadapt.readthedocs.io/en/stable/
|
homepage: https://cutadapt.readthedocs.io/en/stable/
|
||||||
documentation: https://cutadapt.readthedocs.io/en/stable/
|
documentation: https://cutadapt.readthedocs.io/en/stable/
|
||||||
|
@ -33,4 +33,4 @@ output:
|
||||||
type: file
|
type: file
|
||||||
description: trimmed FastQ file, or pair of files
|
description: trimmed FastQ file, or pair of files
|
||||||
authors:
|
authors:
|
||||||
- @piotr-faba-ardigen
|
- "@piotr-faba-ardigen"
|
||||||
|
|
|
@ -1,38 +1,38 @@
|
||||||
nextflow.preview.dsl=2
|
nextflow.preview.dsl=2
|
||||||
|
|
||||||
process FASTQ_SCREEN {
|
process FASTQ_SCREEN {
|
||||||
|
|
||||||
// depending on the number of genomes and the type of genome (e.g. plants!), memory needs to be ample!
|
// depending on the number of genomes and the type of genome (e.g. plants!), memory needs to be ample!
|
||||||
// label 'bigMem'
|
// label 'bigMem'
|
||||||
// label 'multiCore'
|
// label 'multiCore'
|
||||||
|
|
||||||
input:
|
input:
|
||||||
tuple val(name), path(reads)
|
tuple val(name), path(reads)
|
||||||
val (outputdir)
|
val (outputdir)
|
||||||
// fastq_screen_args are best passed in to the workflow in the following manner:
|
// fastq_screen_args are best passed in to the workflow in the following manner:
|
||||||
// --fastq_screen_args="--subset 200000 --force"
|
// --fastq_screen_args="--subset 200000 --force"
|
||||||
val (fastq_screen_args)
|
val (fastq_screen_args)
|
||||||
val (verbose)
|
val (verbose)
|
||||||
|
|
||||||
output:
|
output:
|
||||||
path "*png", emit: png
|
path "*png", emit: png
|
||||||
path "*html", emit: html
|
path "*html", emit: html
|
||||||
path "*txt", emit: report
|
path "*txt", emit: report
|
||||||
|
|
||||||
publishDir "$outputdir",
|
publishDir "$outputdir",
|
||||||
mode: "link", overwrite: true
|
mode: "link", overwrite: true
|
||||||
|
|
||||||
script:
|
script:
|
||||||
println(name)
|
println(name)
|
||||||
println(reads)
|
println(reads)
|
||||||
println(outputdir)
|
println(outputdir)
|
||||||
if (verbose){
|
if (verbose){
|
||||||
println ("[MODULE] FASTQ SCREEN ARGS: "+ fastq_screen_args)
|
println ("[MODULE] FASTQ SCREEN ARGS: "+ fastq_screen_args)
|
||||||
}
|
}
|
||||||
|
|
||||||
"""
|
"""
|
||||||
module load fastq_screen
|
module load fastq_screen
|
||||||
fastq_screen $fastq_screen_args $reads
|
fastq_screen $fastq_screen_args $reads
|
||||||
"""
|
"""
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ output:
|
||||||
- report:
|
- report:
|
||||||
type: file
|
type: file
|
||||||
description: FastQ Screen report
|
description: FastQ Screen report
|
||||||
pattern: *_screen.{txt,html,png}
|
pattern: "*_screen.{txt,html,png}"
|
||||||
optional_pattern: *_screen.bisulfite_orientation.png
|
optional_pattern: "*_screen.bisulfite_orientation.png"
|
||||||
authors:
|
authors:
|
||||||
- @FelixKrueger
|
- "@FelixKrueger"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
FROM nfcore/base:1.7
|
FROM nfcore/base:1.7
|
||||||
LABEL authors="phil.ewels@scilifelab.se" \
|
LABEL authors="phil.ewels@scilifelab.se" \
|
||||||
description="Docker image for nf-core modules fastqc"
|
description="Docker image for nf-core modules fastqc"
|
||||||
|
|
||||||
# foobar
|
# foobar
|
||||||
COPY environment.yml /
|
COPY environment.yml /
|
||||||
|
|
|
@ -1,37 +1,37 @@
|
||||||
nextflow.preview.dsl = 2
|
nextflow.preview.dsl = 2
|
||||||
|
|
||||||
process FASTQC {
|
process FASTQC {
|
||||||
|
|
||||||
// tag "FastQC - $sample_id"
|
// tag "FastQC - $sample_id"
|
||||||
|
|
||||||
input:
|
input:
|
||||||
tuple val(name), path(reads)
|
tuple val(name), path(reads)
|
||||||
val (outputdir)
|
val (outputdir)
|
||||||
// fastqc_args are best passed into the workflow in the following manner:
|
// fastqc_args are best passed into the workflow in the following manner:
|
||||||
// --fastqc_args="--nogroup -a custom_adapter_file.txt"
|
// --fastqc_args="--nogroup -a custom_adapter_file.txt"
|
||||||
val (fastqc_args)
|
val (fastqc_args)
|
||||||
val (verbose)
|
val (verbose)
|
||||||
|
|
||||||
output:
|
output:
|
||||||
tuple val(name), path ("*fastqc*"), emit: all
|
tuple val(name), path ("*fastqc*"), emit: all
|
||||||
path "*.zip", emit: report // e.g. for MultiQC later
|
path "*.zip", emit: report // e.g. for MultiQC later
|
||||||
|
|
||||||
// container 'quay.io/biocontainers/fastqc:0.11.8--2'
|
|
||||||
|
|
||||||
publishDir "$outputdir",
|
// container 'quay.io/biocontainers/fastqc:0.11.8--2'
|
||||||
mode: "copy", overwrite: true
|
|
||||||
|
|
||||||
script:
|
publishDir "$outputdir",
|
||||||
|
mode: "copy", overwrite: true
|
||||||
|
|
||||||
if (verbose){
|
script:
|
||||||
println ("[MODULE] FASTQC ARGS: " + fastqc_args)
|
|
||||||
}
|
|
||||||
|
|
||||||
"""
|
if (verbose){
|
||||||
module load fastqc
|
println ("[MODULE] FASTQC ARGS: " + fastqc_args)
|
||||||
fastqc $fastqc_args -q -t 2 $reads
|
}
|
||||||
|
|
||||||
|
"""
|
||||||
|
module load fastqc
|
||||||
|
fastqc $fastqc_args -q -t 2 $reads
|
||||||
|
|
||||||
fastqc --version &> fastqc.version.txt
|
fastqc --version &> fastqc.version.txt
|
||||||
"""
|
"""
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,8 +27,7 @@ output:
|
||||||
- report:
|
- report:
|
||||||
type: file
|
type: file
|
||||||
description: FastQC report
|
description: FastQC report
|
||||||
pattern: *_fastqc.{zip,html}
|
pattern: "*_fastqc.{zip,html}"
|
||||||
authors:
|
authors:
|
||||||
-
|
- "@ewels"
|
||||||
- @ewels
|
- "@FelixKrueger"
|
||||||
- @FelixKrueger
|
|
||||||
|
|
|
@ -16,4 +16,4 @@ process gatk_dict {
|
||||||
--REFERENCE ${fasta} \
|
--REFERENCE ${fasta} \
|
||||||
--OUTPUT ${fasta.baseName}.dict
|
--OUTPUT ${fasta.baseName}.dict
|
||||||
"""
|
"""
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,12 +14,12 @@ input:
|
||||||
- input:
|
- input:
|
||||||
type: file
|
type: file
|
||||||
description: Input fasta file
|
description: Input fasta file
|
||||||
pattern: *.{fasta,fa}
|
pattern: "*.{fasta,fa}"
|
||||||
output:
|
output:
|
||||||
-
|
-
|
||||||
- dict:
|
- dict:
|
||||||
type: file
|
type: file
|
||||||
description: gatk dictionary file
|
description: gatk dictionary file
|
||||||
pattern: *.{fasta,fa}.{dict}
|
pattern: "*.{fasta,fa}.{dict}"
|
||||||
authors:
|
authors:
|
||||||
- @maxulysse
|
- "@maxulysse"
|
||||||
|
|
|
@ -15,17 +15,17 @@ process HISAT2 {
|
||||||
|
|
||||||
output:
|
output:
|
||||||
path "*bam", emit: bam
|
path "*bam", emit: bam
|
||||||
path "*stats.txt", emit: stats
|
path "*stats.txt", emit: stats
|
||||||
|
|
||||||
publishDir "$outdir/hisat2",
|
publishDir "$outdir/hisat2",
|
||||||
mode: "copy", overwrite: true
|
mode: "copy", overwrite: true
|
||||||
|
|
||||||
script:
|
script:
|
||||||
|
|
||||||
if (verbose){
|
if (verbose){
|
||||||
println ("[MODULE] HISAT2 ARGS: " + hisat2_args)
|
println ("[MODULE] HISAT2 ARGS: " + hisat2_args)
|
||||||
}
|
}
|
||||||
|
|
||||||
cores = 4
|
cores = 4
|
||||||
readString = ""
|
readString = ""
|
||||||
hisat_options = hisat2_args
|
hisat_options = hisat2_args
|
||||||
|
@ -41,7 +41,7 @@ process HISAT2 {
|
||||||
readString = "-U "+reads
|
readString = "-U "+reads
|
||||||
}
|
}
|
||||||
index = params.genome["hisat2"]
|
index = params.genome["hisat2"]
|
||||||
|
|
||||||
splices = ''
|
splices = ''
|
||||||
if (params.genome.containsKey("hisat2_splices")){
|
if (params.genome.containsKey("hisat2_splices")){
|
||||||
splices = " --known-splicesite-infile " + params.genome["hisat2_splices"]
|
splices = " --known-splicesite-infile " + params.genome["hisat2_splices"]
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
name: HISAT2
|
name: HISAT2
|
||||||
description: Graph-based alignment of next generation sequencing reads to a population of genomes
|
description: Graph-based alignment of next generation sequencing reads to a population of genomes
|
||||||
keywords:
|
keywords:
|
||||||
- Alignment
|
- Alignment
|
||||||
|
@ -28,10 +28,10 @@ output:
|
||||||
- report:
|
- report:
|
||||||
type: file
|
type: file
|
||||||
description: mapping statistics report
|
description: mapping statistics report
|
||||||
pattern: *hisat2_stats.txt
|
pattern: "*hisat2_stats.txt"
|
||||||
- alignment:
|
- alignment:
|
||||||
type: file
|
type: file
|
||||||
description: alignment file in BAM format
|
description: alignment file in BAM format
|
||||||
pattern: *hisat2.bam
|
pattern: "*hisat2.bam"
|
||||||
authors:
|
authors:
|
||||||
- @FelixKrueger
|
- "@FelixKrueger"
|
||||||
|
|
|
@ -15,12 +15,12 @@ input:
|
||||||
- input:
|
- input:
|
||||||
type: file
|
type: file
|
||||||
description: Input vcf.gz file
|
description: Input vcf.gz file
|
||||||
pattern: *.{vcf.gz}
|
pattern: "*.{vcf.gz}"
|
||||||
output:
|
output:
|
||||||
-
|
-
|
||||||
- index:
|
- index:
|
||||||
type: file
|
type: file
|
||||||
description: tabix index file
|
description: tabix index file
|
||||||
pattern: *.{vcf.gz.tbi}
|
pattern: "*.{vcf.gz.tbi}"
|
||||||
authors:
|
authors:
|
||||||
- @maxulysse
|
- "@maxulysse"
|
||||||
|
|
|
@ -1,31 +1,31 @@
|
||||||
nextflow.preview.dsl=2
|
nextflow.preview.dsl=2
|
||||||
|
|
||||||
process MULTIQC {
|
process MULTIQC {
|
||||||
|
|
||||||
// tag "FastQC - $sample_id"
|
// tag "FastQC - $sample_id"
|
||||||
|
|
||||||
input:
|
input:
|
||||||
path (file)
|
path (file)
|
||||||
val (outdir)
|
val (outdir)
|
||||||
val (multiqc_args)
|
val (multiqc_args)
|
||||||
// multiqc_args are best passed into the workflow in the following manner:
|
// multiqc_args are best passed into the workflow in the following manner:
|
||||||
// --multiqc_args="--exlude STAR --title custom_report_title"
|
// --multiqc_args="--exlude STAR --title custom_report_title"
|
||||||
val (verbose)
|
val (verbose)
|
||||||
|
|
||||||
output:
|
output:
|
||||||
path "*html", emit: html
|
path "*html", emit: html
|
||||||
|
|
||||||
publishDir "${outdir}/multiqc",
|
publishDir "${outdir}/multiqc",
|
||||||
mode: "copy", overwrite: true
|
mode: "copy", overwrite: true
|
||||||
|
|
||||||
script:
|
script:
|
||||||
|
|
||||||
if (verbose){
|
if (verbose){
|
||||||
println ("[MODULE] MULTIQC ARGS: " + multiqc_args)
|
println ("[MODULE] MULTIQC ARGS: " + multiqc_args)
|
||||||
}
|
}
|
||||||
|
|
||||||
"""
|
"""
|
||||||
multiqc $multiqc_args -x work .
|
multiqc $multiqc_args -x work .
|
||||||
"""
|
"""
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,6 @@ output:
|
||||||
- multiqc_report:
|
- multiqc_report:
|
||||||
type: file
|
type: file
|
||||||
description: MultiQC report
|
description: MultiQC report
|
||||||
pattern: *multiqc*.html
|
pattern: "*multiqc*.html"
|
||||||
authors:
|
authors:
|
||||||
- @FelixKrueger
|
- "@FelixKrueger"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
FROM nfcore/base:1.7
|
FROM nfcore/base:1.7
|
||||||
LABEL authors="phil.ewels@scilifelab.se" \
|
LABEL authors="phil.ewels@scilifelab.se" \
|
||||||
description="Docker image for nf-core modules samtools"
|
description="Docker image for nf-core modules samtools"
|
||||||
|
|
||||||
# foobar
|
# foobar
|
||||||
COPY environment.yml /
|
COPY environment.yml /
|
||||||
|
|
|
@ -16,12 +16,12 @@ input:
|
||||||
- input:
|
- input:
|
||||||
type: file
|
type: file
|
||||||
description: Input fasta file
|
description: Input fasta file
|
||||||
pattern: *.{fasta,fa}
|
pattern: "*.{fasta,fa}"
|
||||||
output:
|
output:
|
||||||
-
|
-
|
||||||
- faidx:
|
- faidx:
|
||||||
type: file
|
type: file
|
||||||
description: samtools index fasta file
|
description: samtools index fasta file
|
||||||
pattern: *.fasta.fai
|
pattern: "*.fasta.fai"
|
||||||
authors:
|
authors:
|
||||||
- @maxulysse
|
- "@maxulysse"
|
||||||
|
|
|
@ -16,12 +16,12 @@ input:
|
||||||
- input:
|
- input:
|
||||||
type: file
|
type: file
|
||||||
description: Input BAM or CRAM file
|
description: Input BAM or CRAM file
|
||||||
pattern: *.{bam,cram}
|
pattern: "*.{bam,cram}"
|
||||||
output:
|
output:
|
||||||
-
|
-
|
||||||
- index:
|
- index:
|
||||||
type: file
|
type: file
|
||||||
description: BAM or CRAM index file
|
description: BAM or CRAM index file
|
||||||
pattern: *.{bai}
|
pattern: "*.{bai}"
|
||||||
authors:
|
authors:
|
||||||
- @ewels
|
- "@ewels"
|
||||||
|
|
|
@ -16,12 +16,12 @@ input:
|
||||||
- input:
|
- input:
|
||||||
type: file
|
type: file
|
||||||
description: Input BAM or CRAM file
|
description: Input BAM or CRAM file
|
||||||
pattern: *.{bam,cram}
|
pattern: "*.{bam,cram}"
|
||||||
output:
|
output:
|
||||||
-
|
-
|
||||||
- sorted_file:
|
- sorted_file:
|
||||||
type: file
|
type: file
|
||||||
description: Sorted BAM or CRAM file
|
description: Sorted BAM or CRAM file
|
||||||
pattern: *.{bam,cram}
|
pattern: "*.{bam,cram}"
|
||||||
authors:
|
authors:
|
||||||
- @ewels
|
- "@ewels"
|
||||||
|
|
|
@ -3,7 +3,7 @@ process shovill {
|
||||||
tag { shovill }
|
tag { shovill }
|
||||||
|
|
||||||
publishDir "${params.outdir}", pattern: '*.fasta', mode: 'copy'
|
publishDir "${params.outdir}", pattern: '*.fasta', mode: 'copy'
|
||||||
|
|
||||||
container "quay.io/biocontainers/shovill:1.0.9--0"
|
container "quay.io/biocontainers/shovill:1.0.9--0"
|
||||||
|
|
||||||
input:
|
input:
|
||||||
|
@ -11,7 +11,7 @@ process shovill {
|
||||||
|
|
||||||
output:
|
output:
|
||||||
path("${sample_id}.fasta")
|
path("${sample_id}.fasta")
|
||||||
|
|
||||||
script:
|
script:
|
||||||
"""
|
"""
|
||||||
shovill --R1 ${forward} --R2 ${reverse} --outdir shovill_out
|
shovill --R1 ${forward} --R2 ${reverse} --outdir shovill_out
|
||||||
|
|
|
@ -27,4 +27,4 @@ output:
|
||||||
description: fasta file
|
description: fasta file
|
||||||
pattern: ${sample_id}.fasta
|
pattern: ${sample_id}.fasta
|
||||||
authors:
|
authors:
|
||||||
- @annacprice
|
- "@annacprice"
|
||||||
|
|
|
@ -14,4 +14,3 @@ process tcoffee {
|
||||||
t_coffee -seq $fasta -outfile ${fasta}.aln
|
t_coffee -seq $fasta -outfile ${fasta}.aln
|
||||||
"""
|
"""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,13 +16,13 @@ input:
|
||||||
- fasta:
|
- fasta:
|
||||||
type: path
|
type: path
|
||||||
description: Input fasta file
|
description: Input fasta file
|
||||||
pattern: *.{fasta,fa,tfa}
|
pattern: "*.{fasta,fa,tfa}"
|
||||||
output:
|
output:
|
||||||
-
|
-
|
||||||
- alignment:
|
- alignment:
|
||||||
type: file
|
type: file
|
||||||
description: tcoffee alignment file
|
description: tcoffee alignment file
|
||||||
pattern: *.aln
|
pattern: "*.aln"
|
||||||
|
|
||||||
authors:
|
authors:
|
||||||
- @JoseEspinosa
|
- "@JoseEspinosa"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
FROM nfcore/base:1.7
|
FROM nfcore/base:1.7
|
||||||
LABEL authors="phil.ewels@scilifelab.se" \
|
LABEL authors="phil.ewels@scilifelab.se" \
|
||||||
description="Docker image for nf-core modules trimgalore"
|
description="Docker image for nf-core modules trimgalore"
|
||||||
|
|
||||||
# foobar
|
# foobar
|
||||||
COPY environment.yml /
|
COPY environment.yml /
|
||||||
|
|
|
@ -13,43 +13,43 @@ params.three_prime_clip_r1 = 0
|
||||||
params.three_prime_clip_r2 = 0
|
params.three_prime_clip_r2 = 0
|
||||||
|
|
||||||
|
|
||||||
process TRIM_GALORE {
|
process TRIM_GALORE {
|
||||||
|
|
||||||
// container 'quay.io/biocontainers/trim-galore:0.6.5--0' // maybe later
|
// container 'quay.io/biocontainers/trim-galore:0.6.5--0' // maybe later
|
||||||
// tag "$sample_id"
|
// tag "$sample_id"
|
||||||
|
|
||||||
input:
|
input:
|
||||||
tuple val (name), path (reads)
|
tuple val (name), path (reads)
|
||||||
val (outdir)
|
val (outdir)
|
||||||
val (trim_galore_args)
|
val (trim_galore_args)
|
||||||
val (verbose)
|
val (verbose)
|
||||||
|
|
||||||
|
output:
|
||||||
|
tuple val(name), path ("*fq.gz"), emit: reads
|
||||||
|
path "*trimming_report.txt", optional: true, emit: report
|
||||||
|
|
||||||
output:
|
|
||||||
tuple val(name), path ("*fq.gz"), emit: reads
|
|
||||||
path "*trimming_report.txt", optional: true, emit: report
|
|
||||||
|
|
||||||
// Trimming reports are not generated for e.g. --hardtrim5, --clock etc
|
// Trimming reports are not generated for e.g. --hardtrim5, --clock etc
|
||||||
// saveAs: {filename ->
|
// saveAs: {filename ->
|
||||||
// else if (filename.indexOf("trimming_report.txt") > 0) "logs/$filename"
|
// else if (filename.indexOf("trimming_report.txt") > 0) "logs/$filename"
|
||||||
// else filename
|
// else filename
|
||||||
// }
|
// }
|
||||||
|
|
||||||
publishDir "${outdir}/trim_galore",
|
publishDir "${outdir}/trim_galore",
|
||||||
mode: "copy", overwrite: true
|
mode: "copy", overwrite: true
|
||||||
|
|
||||||
script:
|
script:
|
||||||
if (verbose){
|
if (verbose){
|
||||||
println ("[MODULE] TRIM GALORE ARGS: " + trim_galore_args)
|
println ("[MODULE] TRIM GALORE ARGS: " + trim_galore_args)
|
||||||
}
|
}
|
||||||
|
|
||||||
trim_galore_args += " --gzip " // we like small files
|
trim_galore_args += " --gzip " // we like small files
|
||||||
|
|
||||||
pairedString = 0
|
pairedString = 0
|
||||||
if (reads instanceof List) {
|
if (reads instanceof List) {
|
||||||
pairedString = 1
|
pairedString = 1
|
||||||
trim_galore_args += " --paired "
|
trim_galore_args += " --paired "
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.clip_r1 > 0){
|
if (params.clip_r1 > 0){
|
||||||
trim_galore_args += " --clip_r1 ${params.clip_r1} "
|
trim_galore_args += " --clip_r1 ${params.clip_r1} "
|
||||||
}
|
}
|
||||||
|
@ -62,12 +62,12 @@ process TRIM_GALORE {
|
||||||
if (params.three_prime_clip_r2 > 0){
|
if (params.three_prime_clip_r2 > 0){
|
||||||
trim_galore_args += " --three_prime_clip_r2 ${params.three_prime_clip_r2} "
|
trim_galore_args += " --three_prime_clip_r2 ${params.three_prime_clip_r2} "
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.trim_nextseq > 0){
|
if (params.trim_nextseq > 0){
|
||||||
trim_galore_args += " --nextseq ${params.trim_nextseq} "
|
trim_galore_args += " --nextseq ${params.trim_nextseq} "
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Pre-set parameters for certain bisulfite-seq applications
|
// Pre-set parameters for certain bisulfite-seq applications
|
||||||
if (params.singlecell){
|
if (params.singlecell){
|
||||||
trim_galore_args += " --clip_r1 6 "
|
trim_galore_args += " --clip_r1 6 "
|
||||||
|
@ -77,7 +77,7 @@ process TRIM_GALORE {
|
||||||
}
|
}
|
||||||
if (params.rrbs){
|
if (params.rrbs){
|
||||||
trim_galore_args += " --rrbs "
|
trim_galore_args += " --rrbs "
|
||||||
}
|
}
|
||||||
if (params.pbat){
|
if (params.pbat){
|
||||||
trim_galore_args += " --clip_r1 $params.pbat "
|
trim_galore_args += " --clip_r1 $params.pbat "
|
||||||
if (pairedString == 1){
|
if (pairedString == 1){
|
||||||
|
@ -85,17 +85,16 @@ process TRIM_GALORE {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
"""
|
"""
|
||||||
module load trim_galore
|
module load trim_galore
|
||||||
trim_galore $trim_galore_args $reads
|
trim_galore $trim_galore_args $reads
|
||||||
"""
|
"""
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -28,14 +28,13 @@ output:
|
||||||
- trimmed_fastq:
|
- trimmed_fastq:
|
||||||
type: file
|
type: file
|
||||||
description: Trimmed FastQ files
|
description: Trimmed FastQ files
|
||||||
pattern: *fq.gz
|
pattern: "*fq.gz"
|
||||||
-
|
-
|
||||||
- report:
|
- report:
|
||||||
type: file
|
type: file
|
||||||
description: Trim Galore! trimming report
|
description: Trim Galore! trimming report
|
||||||
pattern: *trimming_report.txt
|
pattern: "*trimming_report.txt"
|
||||||
|
|
||||||
authors:
|
authors:
|
||||||
-
|
- "@ewels"
|
||||||
- @ewels
|
- "@FelixKrueger"
|
||||||
- @FelixKrueger
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
FROM nfcore/base:1.7
|
FROM nfcore/base:1.7
|
||||||
LABEL authors="chris.cheshire@crick.ac.uk" \
|
LABEL authors="chris.cheshire@crick.ac.uk" \
|
||||||
description="Docker image containing all requirements for the nf-core umi_tools module"
|
description="Docker image containing all requirements for the nf-core umi_tools module"
|
||||||
|
|
||||||
# Install conda packages
|
# Install conda packages
|
||||||
COPY environment.yml /
|
COPY environment.yml /
|
||||||
RUN conda env create -f /environment.yml && conda clean -a
|
RUN conda env create -f /environment.yml && conda clean -a
|
||||||
ENV PATH /opt/conda/envs/nfcore-module-umitools/bin:$PATH
|
ENV PATH /opt/conda/envs/nfcore-module-umitools/bin:$PATH
|
||||||
|
|
|
@ -12,7 +12,7 @@ process umitools_dedup {
|
||||||
|
|
||||||
input:
|
input:
|
||||||
tuple val(sample_id), path(bam)
|
tuple val(sample_id), path(bam)
|
||||||
|
|
||||||
output:
|
output:
|
||||||
tuple val(sample_id), path("${sample_id}.dedup.bam"), emit: dedupBam
|
tuple val(sample_id), path("${sample_id}.dedup.bam"), emit: dedupBam
|
||||||
tuple val(sample_id), path("${sample_id}.dedup.bam.bai"), emit: dedupBai
|
tuple val(sample_id), path("${sample_id}.dedup.bam.bai"), emit: dedupBai
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
name: umi_tools
|
name: umi_tools
|
||||||
version: 1.0
|
version: 1.0
|
||||||
description: Tools for dealing with Unique Molecular Identifiers (UMIs)/Random Molecular Tags (RMTs) and single cell RNA-Seq cell barcodes.
|
description: Tools for dealing with Unique Molecular Identifiers (UMIs)/Random Molecular Tags (RMTs) and single cell RNA-Seq cell barcodes.
|
||||||
keywords:
|
keywords:
|
||||||
- UMI
|
- UMI
|
||||||
- RMT
|
- RMT
|
||||||
|
@ -8,7 +8,7 @@ keywords:
|
||||||
tools:
|
tools:
|
||||||
- umi_tools:
|
- umi_tools:
|
||||||
description: |
|
description: |
|
||||||
Tools for dealing with Unique Molecular Identifiers (UMIs)/Random Molecular Tags (RMTs) and single cell RNA-Seq cell barcodes.
|
Tools for dealing with Unique Molecular Identifiers (UMIs)/Random Molecular Tags (RMTs) and single cell RNA-Seq cell barcodes.
|
||||||
homepage: https://github.com/CGATOxford/UMI-tools
|
homepage: https://github.com/CGATOxford/UMI-tools
|
||||||
documentation: https://umi-tools.readthedocs.io/en/latest/
|
documentation: https://umi-tools.readthedocs.io/en/latest/
|
||||||
processes:
|
processes:
|
||||||
|
@ -18,7 +18,7 @@ processes:
|
||||||
The program will execute with the following pattern:
|
The program will execute with the following pattern:
|
||||||
umi_tools dedup --log={SAMPLE_ID}.dedup.log {params.umitools_dedup_args} -I {SAMPLE_ID}.bam -S {SAMPLE_ID}.dedup.bam --output-stats={SAMPLE_ID}
|
umi_tools dedup --log={SAMPLE_ID}.dedup.log {params.umitools_dedup_args} -I {SAMPLE_ID}.bam -S {SAMPLE_ID}.dedup.bam --output-stats={SAMPLE_ID}
|
||||||
description: |
|
description: |
|
||||||
Groups PCR duplicates and de-duplicates reads to yield one read per group.
|
Groups PCR duplicates and de-duplicates reads to yield one read per group.
|
||||||
Use this when you want to remove the PCR duplicates prior to any downstream analysis.
|
Use this when you want to remove the PCR duplicates prior to any downstream analysis.
|
||||||
input:
|
input:
|
||||||
- sample_id:
|
- sample_id:
|
||||||
|
@ -31,16 +31,15 @@ processes:
|
||||||
- dedupBam:
|
- dedupBam:
|
||||||
type: tuple
|
type: tuple
|
||||||
description: A tuple of samples id and output bam file
|
description: A tuple of samples id and output bam file
|
||||||
pattern: [sample_id, *SAMPLE_ID.dedup.bam]
|
pattern: [sample_id, "*SAMPLE_ID.dedup.bam"]
|
||||||
- dedupBam:
|
- dedupBam:
|
||||||
type: tuple
|
type: tuple
|
||||||
description: A tuple of samples id and output bai file
|
description: A tuple of samples id and output bai file
|
||||||
pattern: [sample_id, *SAMPLE_ID.dedup.bam.bai]
|
pattern: [sample_id, "*SAMPLE_ID.dedup.bam.bai"]
|
||||||
- report:
|
- report:
|
||||||
type: file
|
type: file
|
||||||
description: Log file for the umi_tools operation
|
description: Log file for the umi_tools operation
|
||||||
pattern: *SAMPLE_ID.dedup.log
|
pattern: "*SAMPLE_ID.dedup.log"
|
||||||
authors:
|
authors:
|
||||||
- @candiceh08
|
- "@candiceh08"
|
||||||
- @chris-cheshire
|
- "@chris-cheshire"
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue