Merge branch 'nf-core:master' into antismashlite

This commit is contained in:
Jasmin F 2022-04-25 14:21:54 +02:00 committed by GitHub
commit 6467e23809
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 555 additions and 43 deletions

View file

@ -4,8 +4,8 @@ process CAT_FASTQ {
conda (params.enable_conda ? "conda-forge::sed=4.7" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://containers.biocontainers.pro/s3/SingImgsRepo/biocontainers/v1.2.0_cv1/biocontainers_v1.2.0_cv1.img' :
'biocontainers/biocontainers:v1.2.0_cv1' }"
'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
'ubuntu:20.04' }"
input:
tuple val(meta), path(reads, stageAs: "input*/*")

View file

@ -0,0 +1,89 @@
process ELPREP_FILTER {
tag "$meta.id"
label 'process_high'
conda (params.enable_conda ? "bioconda::elprep=5.1.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/elprep:5.1.2--he881be0_0':
'quay.io/biocontainers/elprep:5.1.2--he881be0_0' }"
input:
tuple val(meta), path(bam)
val(run_haplotypecaller)
val(run_bqsr)
path(reference_sequences)
path(filter_regions_bed)
path(reference_elfasta)
path(known_sites_elsites)
path(target_regions_bed)
path(intermediate_bqsr_tables)
val(bqsr_tables_only)
val(get_activity_profile)
val(get_assembly_regions)
output:
tuple val(meta), path("output/**.{bam,sam}") ,emit: bam
tuple val(meta), path("*.metrics.txt") ,optional: true, emit: metrics
tuple val(meta), path("*.recall") ,optional: true, emit: recall
tuple val(meta), path("*.vcf.gz") ,optional: true, emit: gvcf
tuple val(meta), path("*.table") ,optional: true, emit: table
tuple val(meta), path("*.activity_profile.igv") ,optional: true, emit: activity_profile
tuple val(meta), path("*.assembly_regions.igv") ,optional: true, emit: assembly_regions
path "versions.yml" ,emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def suffix = args.contains("--output-type sam") ? "sam" : "bam"
// filter args
def reference_sequences_cmd = reference_sequences ? " --replace-reference-sequences ${reference_sequences}" : ""
def filter_regions_cmd = filter_regions_bed ? " --filter-non-overlapping-reads ${filter_regions_bed}" : ""
// markdup args
def markdup_cmd = args.contains("--mark-duplicates") ? " --mark-optical-duplicates ${prefix}.metrics.txt": ""
// variant calling args
def haplotyper_cmd = run_haplotypecaller ? " --haplotypecaller ${prefix}.g.vcf.gz": ""
def fasta_cmd = reference_elfasta ? " --reference ${reference_elfasta}": ""
def known_sites_cmd = known_sites_elsites ? " --known-sites ${known_sites_elsites}": ""
def target_regions_cmd = target_regions_bed ? " --target-regions ${target_regions_bed}": ""
// bqsr args
def bqsr_cmd = run_bqsr ? " --bqsr ${prefix}.recall": ""
def bqsr_tables_only_cmd = bqsr_tables_only ? " --bqsr-tables-only ${prefix}.table": ""
def intermediate_bqsr_cmd = intermediate_bqsr_tables ? " --bqsr-apply .": ""
// misc
def activity_profile_cmd = get_activity_profile ? " --activity-profile ${prefix}.activity_profile.igv": ""
def assembly_regions_cmd = get_assembly_regions ? " --assembly-regions ${prefix}.assembly_regions.igv": ""
"""
elprep filter ${bam} output/${prefix}.${suffix} \\
${reference_sequences_cmd} \\
${filter_regions_cmd} \\
${markdup_cmd} \\
${haplotyper_cmd} \\
${fasta_cmd} \\
${known_sites_cmd} \\
${target_regions_cmd} \\
${bqsr_cmd} \\
${bqsr_tables_only_cmd} \\
${intermediate_bqsr_cmd} \\
${activity_profile_cmd} \\
${assembly_regions_cmd} \\
--nr-of-threads ${task.cpus} \\
$args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
elprep: \$(elprep 2>&1 | head -n2 | tail -n1 |sed 's/^.*version //;s/ compiled.*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,106 @@
name: "elprep_filter"
description: "Filter, sort and markdup sam/bam files, with optional BQSR and variant calling."
keywords:
- sort
- bam
- sam
- filter
- variant calling
tools:
- "elprep":
description: "elPrep is a high-performance tool for preparing .sam/.bam files for variant calling in sequencing pipelines. It can be used as a drop-in replacement for SAMtools/Picard/GATK4."
homepage: "https://github.com/ExaScience/elprep"
documentation: "https://github.com/ExaScience/elprep"
tool_dev_url: "https://github.com/ExaScience/elprep"
doi: "10.1371/journal.pone.0244471"
licence: "['AGPL v3']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: Input SAM/BAM file
pattern: "*.{bam,sam}"
- run_haplotypecaller:
type: boolean
description: Run variant calling on the input files. Needed to generate gvcf output.
- run_bqsr:
type: boolean
description: Run BQSR on the input files. Needed to generate recall metrics.
- reference_sequences:
type: file
description: Optional SAM header to replace existing header.
pattern: "*.sam"
- filter_regions_bed:
type: file
description: Optional BED file containing regions to filter.
pattern: "*.bed"
- reference_elfasta:
type: file
description: Elfasta file, required for BQSR and variant calling.
pattern: "*.elfasta"
- known_sites:
type: file
description: Optional elsites file containing known SNPs for BQSR.
pattern: "*.elsites"
- target_regions_bed:
type: file
description: Optional BED file containing target regions for BQSR and variant calling.
pattern: "*.bed"
- intermediate_bqsr_tables:
type: file
description: Optional list of BQSR tables, used when parsing files created by `elprep split`
pattern: "*.table"
- bqsr_tables_only:
type: boolean
description: Write intermediate BQSR tables, used when parsing files created by `elprep split`.
- get_activity_profile:
type: boolean
description: Get the activity profile calculated by the haplotypecaller to the given file in IGV format.
- get_assembly_regions:
type: boolean
description: Get the assembly regions calculated by haplotypecaller to the speficied file in IGV format.
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- bam:
type: file
description: Sorted, markdup, optionally BQSR BAM/SAM file
pattern: "*.{bam,sam}"
- metrics:
type: file
description: Optional duplicate metrics file generated by elprep
pattern: "*.{metrics.txt}"
- recall:
type: file
description: Optional recall metrics file generated by elprep
pattern: "*.{recall}"
- gvcf:
type: file
description: Optional GVCF output file
pattern: "*.{vcf.gz}"
- table:
type: file
description: Optional intermediate BQSR table output file
pattern: "*.{table}"
- activity_profile:
type: file
description: Optional activity profile output file
pattern: "*.{activity_profile.igv}"
- assembly_regions:
type: file
description: Optional activity regions output file
pattern: "*.{assembly_regions.igv}"
authors:
- "@matthdsm"

View file

@ -11,16 +11,16 @@ process ELPREP_SPLIT {
tuple val(meta), path(bam)
output:
tuple val(meta), path("**.{bam,sam}"), emit: bam
tuple val(meta), path("output/**.{bam,sam}"), emit: bam
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
meta.single_end ? args += " --single-end": ""
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def single_end = meta.single_end ? " --single-end": ""
"""
# create directory and move all input so elprep can find and merge them before splitting
@ -31,8 +31,9 @@ process ELPREP_SPLIT {
elprep split \\
input \\
. \\
output/ \\
$args \\
$single_end \\
--nr-of-threads $task.cpus \\
--output-prefix $prefix

41
modules/gamma/main.nf Normal file
View file

@ -0,0 +1,41 @@
def VERSION = '2.1' // Version information not provided by tool on CLI
process GAMMA {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::gamma=2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/gamma%3A2.1--hdfd78af_0':
'quay.io/biocontainers/gamma:2.1--hdfd78af_0' }"
input:
tuple val(meta), path(fasta)
path(db)
output:
tuple val(meta), path("*.gamma") , emit: gamma
tuple val(meta), path("*.psl") , emit: psl
tuple val(meta), path("*.gff") , optional:true , emit: gff
tuple val(meta), path("*.fasta"), optional:true , emit: fasta
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
GAMMA.py \\
$args \\
$fasta \\
$db \\
$prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
gamma: $VERSION
END_VERSIONS
"""
}

63
modules/gamma/meta.yml Normal file
View file

@ -0,0 +1,63 @@
name: "gamma"
description: Gene Allele Mutation Microbial Assessment
keywords:
- gamma
- gene-calling
tools:
- "gamma":
description: "Tool for Gene Allele Mutation Microbial Assessment"
homepage: "https://github.com/rastanton/GAMMA"
documentation: "https://github.com/rastanton/GAMMA"
tool_dev_url: "https://github.com/rastanton/GAMMA"
doi: "10.1093/bioinformatics/btab607"
licence: "['Apache License 2.0']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- fasta:
type: file
description: FASTA file
pattern: "*.{fa,fasta}"
- db:
type: file
description: Database in FASTA format
pattern: "*.{fa,fasta}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- gamma:
type: file
description: GAMMA file with annotated gene matches
pattern: "*.{gamma}"
- psl:
type: file
description: PSL file with all gene matches found
pattern: "*.{psl}"
- gff:
type: file
description: GFF file
pattern: "*.{gff}"
- fasta:
type: file
description: multifasta file of the gene matches
pattern: "*.{fasta}"
authors:
- "@sateeshperi"
- "@rastanton"

View file

@ -4,8 +4,8 @@ process GUNZIP {
conda (params.enable_conda ? "conda-forge::sed=4.7" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://containers.biocontainers.pro/s3/SingImgsRepo/biocontainers/v1.2.0_cv1/biocontainers_v1.2.0_cv1.img' :
'biocontainers/biocontainers:v1.2.0_cv1' }"
'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
'ubuntu:20.04' }"
input:
tuple val(meta), path(archive)

View file

@ -0,0 +1,40 @@
process KAIJU_KAIJU2TABLE {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::kaiju=1.8.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/kaiju:1.8.2--h5b5514e_1':
'quay.io/biocontainers/kaiju:1.8.2--h2e03b76_0' }"
input:
tuple val(meta), path(results)
path db
val taxon_rank
output:
tuple val(meta), path('*.txt'), emit: summary
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
dbnodes=`find -L ${db} -name "*nodes.dmp"`
dbname=`find -L ${db} -name "*.fmi" -not -name "._*"`
kaiju2table $args \\
-t \$dbnodes \\
-n \$dbname \\
-r ${taxon_rank} \\
-o ${prefix}.txt \\
${results}
cat <<-END_VERSIONS > versions.yml
"${task.process}":
kaiju: \$(echo \$( kaiju -h 2>&1 | sed -n 1p | sed 's/^.*Kaiju //' ))
END_VERSIONS
"""
}

View file

@ -0,0 +1,50 @@
name: "kaiju_kaiju2table"
description: write your description here
keywords:
- classify
- metagenomics
tools:
- kaiju:
description: Fast and sensitive taxonomic classification for metagenomics
homepage: https://kaiju.binf.ku.dk/
documentation: https://github.com/bioinformatics-centre/kaiju/blob/master/README.md
tool_dev_url: https://github.com/bioinformatics-centre/kaiju
doi: "10.1038/ncomms11257"
licence: ["GNU GPL v3"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- results:
type: file
description: File containing the kaiju classification results
pattern: "*.{txt}"
- taxon_rank:
type: string
description: |
Taxonomic rank to display in report
pattern: "phylum|class|order|family|genus|species"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- results:
type: file
description: |
Summary table for a given taxonomic rank
pattern: "*.{tsv}"
authors:
- "@sofstam"
- "@talnor"
- "@jfy133"

View file

@ -26,7 +26,7 @@ process PHANTOMPEAKQUALTOOLS {
def prefix = task.ext.prefix ?: "${meta.id}"
"""
RUN_SPP=`which run_spp.R`
Rscript $args -e "library(caTools); source(\\"\$RUN_SPP\\")" -c="$bam" -savp="${prefix}.spp.pdf" -savd="${prefix}.spp.Rdata" -out="${prefix}.spp.out" -p=$task.cpus
Rscript $args -e "library(caTools); source(\\"\$RUN_SPP\\")" -c="$bam" -savp="${prefix}.spp.pdf" -savd="${prefix}.spp.Rdata" -out="${prefix}.spp.out"
cat <<-END_VERSIONS > versions.yml
"${task.process}":

View file

@ -2,10 +2,10 @@ process STRINGTIE_MERGE {
label 'process_medium'
// Note: 2.7X indices incompatible with AWS iGenomes.
conda (params.enable_conda ? "bioconda::stringtie=2.1.7" : null)
conda (params.enable_conda ? "bioconda::stringtie=2.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/stringtie:2.1.7--h978d192_0' :
'quay.io/biocontainers/stringtie:2.1.7--h978d192_0' }"
'https://depot.galaxyproject.org/singularity/stringtie:2.2.1--hecb563c_2' :
'quay.io/biocontainers/stringtie:2.2.1--hecb563c_2' }"
input:
path stringtie_gtf

View file

@ -1,11 +1,11 @@
process STRINGTIE {
process STRINGTIE_STRINGTIE {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::stringtie=2.1.7" : null)
conda (params.enable_conda ? "bioconda::stringtie=2.2.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/stringtie:2.1.7--h978d192_0' :
'quay.io/biocontainers/stringtie:2.1.7--h978d192_0' }"
'https://depot.galaxyproject.org/singularity/stringtie:2.2.1--hecb563c_2' :
'quay.io/biocontainers/stringtie:2.2.1--hecb563c_2' }"
input:
tuple val(meta), path(bam)

View file

@ -1,4 +1,4 @@
name: stringtie
name: stringtie_stringtie
description: Transcript assembly and quantification for RNA-Se
keywords:
- transcript

View file

@ -2,10 +2,10 @@ process UNTAR {
tag "$archive"
label 'process_low'
conda (params.enable_conda ? "conda-forge::tar=1.34" : null)
conda (params.enable_conda ? "conda-forge::sed=4.7" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://containers.biocontainers.pro/s3/SingImgsRepo/biocontainers/v1.2.0_cv2/biocontainers_v1.2.0_cv2.img' :
'biocontainers/biocontainers:v1.2.0_cv2' }"
'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
'ubuntu:20.04' }"
input:
tuple val(meta), path(archive)

View file

@ -603,6 +603,10 @@ ectyper:
- modules/ectyper/**
- tests/modules/ectyper/**
elprep/filter:
- modules/elprep/filter/**
- tests/modules/elprep/filter/**
elprep/split:
- modules/elprep/split/**
- tests/modules/elprep/split/**
@ -675,6 +679,10 @@ freebayes:
- modules/freebayes/**
- tests/modules/freebayes/**
gamma:
- modules/gamma/**
- tests/modules/gamma/**
gatk4/applybqsr:
- modules/gatk4/applybqsr/**
- tests/modules/gatk4/applybqsr/**
@ -1009,6 +1017,10 @@ kaiju/kaiju:
- modules/kaiju/kaiju/**
- tests/modules/kaiju/kaiju/**
kaiju/kaiju2table:
- modules/kaiju/kaiju2table/**
- tests/modules/kaiju/kaiju2table/**
kallisto/index:
- modules/kallisto/index/**
- tests/modules/kallisto/index/**

View file

@ -112,6 +112,7 @@ params {
}
'homo_sapiens' {
'genome' {
genome_elfasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome.elfasta"
genome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome.fasta"
genome_fasta_fai = "${test_data_dir}/genomics/homo_sapiens/genome/genome.fasta.fai"
genome_dict = "${test_data_dir}/genomics/homo_sapiens/genome/genome.dict"
@ -123,6 +124,7 @@ params {
genome_header = "${test_data_dir}/genomics/homo_sapiens/genome/genome.header"
genome_bed_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed.gz"
genome_bed_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed.gz.tbi"
genome_elsites = "${test_data_dir}/genomics/homo_sapiens/genome/genome.elsites"
transcriptome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/transcriptome.fasta"
genome2_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome2.fasta"
genome_chain_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.chain.gz"
@ -136,6 +138,7 @@ params {
genome_21_multi_interval_bed_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/multi_intervals.bed.gz.tbi"
genome_21_chromosomes_dir = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/chromosomes.tar.gz"
dbsnp_146_hg38_elsites = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.elsites"
dbsnp_146_hg38_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.vcf.gz"
dbsnp_146_hg38_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.vcf.gz.tbi"
gnomad_r2_1_1_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/gnomAD.r2.1.1.vcf.gz"
@ -332,6 +335,7 @@ params {
'bacteroides_fragilis' {
'genome' {
genome_fna_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.fna.gz"
genome_gbff_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.gbff.gz"
genome_paf = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.paf"
genome_mapping_potential_arg = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.mapping.potential.ARG"

View file

@ -0,0 +1,18 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { ELPREP_FILTER } from '../../../../modules/elprep/filter/main.nf'
workflow test_elprep_filter {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true)
]
reference_elfasta = file(params.test_data['homo_sapiens']['genome']['genome_elfasta'], checkIfExists: true)
known_sites_elsites = file(params.test_data['homo_sapiens']['genome']['dbsnp_146_hg38_elsites'], checkIfExists: true)
target_regions_bed = file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true)
ELPREP_FILTER ( input, true, true, [], [], reference_elfasta, known_sites_elsites, target_regions_bed, [], [], true, true)
}

View file

@ -0,0 +1,7 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: ELPREP_FILTER {
ext.args = "--mark-duplicates "
}
}

View file

@ -0,0 +1,13 @@
- name: elprep filter test_elprep_filter
command: nextflow run tests/modules/elprep/filter -entry test_elprep_filter -c tests/config/nextflow.config
tags:
- elprep
- elprep/filter
files:
- path: output/elprep/test.activity_profile.igv
- path: output/elprep/test.assembly_regions.igv
- path: output/elprep/output/test.bam
- path: output/elprep/test.g.vcf.gz
- path: output/elprep/test.metrics.txt
- path: output/elprep/test.recall
- path: output/elprep/versions.yml

View file

@ -4,7 +4,7 @@
- elprep
- elprep/split
files:
- path: output/elprep/splits/test-group00001.bam
- path: output/elprep/splits/test-unmapped.bam
- path: output/elprep/test-spread.bam
- path: output/elprep/output/splits/test-group00001.bam
- path: output/elprep/output/splits/test-unmapped.bam
- path: output/elprep/output/test-spread.bam
- path: output/elprep/versions.yml

View file

@ -0,0 +1,17 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { GAMMA } from '../../../modules/gamma/main.nf'
workflow test_gamma {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
]
db = [ file(params.test_data['sarscov2']['genome']['transcriptome_fasta'], checkIfExists: true) ]
GAMMA ( input, db )
}

View file

@ -0,0 +1,7 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
ext.args = '--fasta'
}

View file

@ -0,0 +1,13 @@
- name: gamma test_gamma
command: nextflow run tests/modules/gamma -entry test_gamma -c tests/config/nextflow.config
tags:
- gamma
files:
- path: output/gamma/test.fasta
md5sum: df37b48466181311e0a679f3c5878484
- path: output/gamma/test.gamma
md5sum: 3256708fa517a65ed01d99e0e3c762ae
- path: output/gamma/test.psl
md5sum: 162a2757ed3b167ae1e0cdb24213f940
- path: output/gamma/versions.yml
md5sum: 3fefb5b46c94993362243c5f9a472057

View file

@ -0,0 +1,21 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { UNTAR } from '../../../../modules/untar/main.nf'
include { KAIJU_KAIJU } from '../../../../modules/kaiju/kaiju/main.nf'
include { KAIJU_KAIJU2TABLE } from '../../../../modules/kaiju/kaiju2table/main.nf'
workflow test_kaiju_kaiju_single_end {
input = [
[ id:'test', single_end:true ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
db = [ [], file(params.test_data['sarscov2']['genome']['kaiju_tar_gz'], checkIfExists: true) ]
taxon_rank = "species"
ch_db = UNTAR ( db )
KAIJU_KAIJU ( input, ch_db.untar.map{ it[1] } )
KAIJU_KAIJU2TABLE ( KAIJU_KAIJU.out.results, ch_db.untar.map{ it[1] }, taxon_rank )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,9 @@
- name: kaiju kaiju2table test_kaiju_kaiju_single_end
command: nextflow run tests/modules/kaiju/kaiju2table -entry test_kaiju_kaiju_single_end -c tests/config/nextflow.config
tags:
- kaiju
- kaiju/kaiju2table
files:
- path: output/kaiju/test.txt
md5sum: 0d9f8fd36fcf2888296ae12632c5f0a8
- path: output/kaiju/versions.yml

View file

@ -2,8 +2,8 @@
nextflow.enable.dsl = 2
include { STRINGTIE } from '../../../../modules/stringtie/stringtie/main.nf'
include { STRINGTIE_MERGE } from '../../../../modules/stringtie/merge/main.nf'
include { STRINGTIE_STRINGTIE } from '../../../../modules/stringtie/stringtie/main.nf'
include { STRINGTIE_MERGE } from '../../../../modules/stringtie/merge/main.nf'
/*
* Test with forward strandedness
@ -15,8 +15,8 @@ workflow test_stringtie_forward_merge {
]
annotation_gtf = file(params.test_data['homo_sapiens']['genome']['genome_gtf'], checkIfExists: true)
STRINGTIE ( input, annotation_gtf )
STRINGTIE
STRINGTIE_STRINGTIE ( input, annotation_gtf )
STRINGTIE_STRINGTIE
.out
.transcript_gtf
.map { it -> it[1] }
@ -35,8 +35,8 @@ workflow test_stringtie_reverse_merge {
]
annotation_gtf = file(params.test_data['homo_sapiens']['genome']['genome_gtf'], checkIfExists: true)
STRINGTIE ( input, annotation_gtf )
STRINGTIE
STRINGTIE_STRINGTIE ( input, annotation_gtf )
STRINGTIE_STRINGTIE
.out
.transcript_gtf
.map { it -> it[1] }

View file

@ -5,7 +5,7 @@
- stringtie/merge
files:
- path: output/stringtie/stringtie.merged.gtf
md5sum: 9fab7049ef2eafdea246fc787d1def40
md5sum: d959eb2fab0db48ded7275e0a2e83c05
- path: output/stringtie/test.ballgown/e2t.ctab
md5sum: 9ae42e056c955a88a883e5e917840d77
- path: output/stringtie/test.ballgown/e_data.ctab
@ -17,11 +17,10 @@
- path: output/stringtie/test.ballgown/t_data.ctab
md5sum: 92a98902784e7406ffe054d2adbabc7c
- path: output/stringtie/test.coverage.gtf
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: output/stringtie/test.gene.abundance.txt
md5sum: 9708811bcefe0f6384293d6f419f3250
md5sum: 8bcd8e2730ed3337e2730186dbc184f3
- path: output/stringtie/test.transcripts.gtf
md5sum: 0e42709bfe30c2c7f2574ba664f5fa9f
md5sum: a914bd55b68a4b5f607738b17861e362
- name: stringtie merge test_stringtie_reverse_merge
command: nextflow run ./tests/modules/stringtie/merge -entry test_stringtie_reverse_merge -c ./tests/config/nextflow.config -c ./tests/modules/stringtie/merge/nextflow.config
@ -30,7 +29,7 @@
- stringtie/merge
files:
- path: output/stringtie/stringtie.merged.gtf
md5sum: afc461bb3cbc368f268a7a45c1b54497
md5sum: 6da479298d73d5b3216d4e1576a2bdf4
- path: output/stringtie/test.ballgown/e2t.ctab
md5sum: 9ae42e056c955a88a883e5e917840d77
- path: output/stringtie/test.ballgown/e_data.ctab
@ -42,8 +41,7 @@
- path: output/stringtie/test.ballgown/t_data.ctab
md5sum: 92a98902784e7406ffe054d2adbabc7c
- path: output/stringtie/test.coverage.gtf
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: output/stringtie/test.gene.abundance.txt
md5sum: 94b85145d60ab1b80a7f0f6cf08418b0
md5sum: f289f41b3ba1b9f0aa05d14408f1a5da
- path: output/stringtie/test.transcripts.gtf
md5sum: 3196e3d50fd461aae6408e0a70acae68
md5sum: 9dcdc9577c0fdbb25089eda210267546

View file

@ -2,7 +2,7 @@
nextflow.enable.dsl = 2
include { STRINGTIE } from '../../../../modules/stringtie/stringtie/main.nf'
include { STRINGTIE_STRINGTIE } from '../../../../modules/stringtie/stringtie/main.nf'
//
// Test with forward strandedness
//
@ -13,7 +13,7 @@ workflow test_stringtie_forward {
]
annotation_gtf = file(params.test_data['sarscov2']['genome']['genome_gtf'], checkIfExists: true)
STRINGTIE ( input, annotation_gtf )
STRINGTIE_STRINGTIE ( input, annotation_gtf )
}
//
@ -26,5 +26,5 @@ workflow test_stringtie_reverse {
]
annotation_gtf = file(params.test_data['sarscov2']['genome']['genome_gtf'], checkIfExists: true)
STRINGTIE ( input, annotation_gtf )
STRINGTIE_STRINGTIE ( input, annotation_gtf )
}

View file

@ -8,7 +8,6 @@
- path: ./output/stringtie/test.gene.abundance.txt
md5sum: 7d8bce7f2a922e367cedccae7267c22e
- path: ./output/stringtie/test.coverage.gtf
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: ./output/stringtie/test.ballgown/e_data.ctab
md5sum: 6b4cf69bc03f3f69890f972a0e8b7471
- path: ./output/stringtie/test.ballgown/i_data.ctab
@ -30,7 +29,6 @@
- path: ./output/stringtie/test.gene.abundance.txt
md5sum: 7385b870b955dae2c2ab78a70cf05cce
- path: ./output/stringtie/test.coverage.gtf
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: ./output/stringtie/test.ballgown/e_data.ctab
md5sum: 879b6696029d19c4737b562e9d149218
- path: ./output/stringtie/test.ballgown/i_data.ctab