Merge branch 'master' into rp3-add-shigatyper

This commit is contained in:
Sateesh Peri 2022-04-22 09:01:36 -04:00 committed by GitHub
commit 8f2561c07f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
27 changed files with 541 additions and 13 deletions

View file

@ -0,0 +1,89 @@
process ELPREP_FILTER {
tag "$meta.id"
label 'process_high'
conda (params.enable_conda ? "bioconda::elprep=5.1.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/elprep:5.1.2--he881be0_0':
'quay.io/biocontainers/elprep:5.1.2--he881be0_0' }"
input:
tuple val(meta), path(bam)
val(run_haplotypecaller)
val(run_bqsr)
path(reference_sequences)
path(filter_regions_bed)
path(reference_elfasta)
path(known_sites_elsites)
path(target_regions_bed)
path(intermediate_bqsr_tables)
val(bqsr_tables_only)
val(get_activity_profile)
val(get_assembly_regions)
output:
tuple val(meta), path("output/**.{bam,sam}") ,emit: bam
tuple val(meta), path("*.metrics.txt") ,optional: true, emit: metrics
tuple val(meta), path("*.recall") ,optional: true, emit: recall
tuple val(meta), path("*.vcf.gz") ,optional: true, emit: gvcf
tuple val(meta), path("*.table") ,optional: true, emit: table
tuple val(meta), path("*.activity_profile.igv") ,optional: true, emit: activity_profile
tuple val(meta), path("*.assembly_regions.igv") ,optional: true, emit: assembly_regions
path "versions.yml" ,emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def suffix = args.contains("--output-type sam") ? "sam" : "bam"
// filter args
def reference_sequences_cmd = reference_sequences ? " --replace-reference-sequences ${reference_sequences}" : ""
def filter_regions_cmd = filter_regions_bed ? " --filter-non-overlapping-reads ${filter_regions_bed}" : ""
// markdup args
def markdup_cmd = args.contains("--mark-duplicates") ? " --mark-optical-duplicates ${prefix}.metrics.txt": ""
// variant calling args
def haplotyper_cmd = run_haplotypecaller ? " --haplotypecaller ${prefix}.g.vcf.gz": ""
def fasta_cmd = reference_elfasta ? " --reference ${reference_elfasta}": ""
def known_sites_cmd = known_sites_elsites ? " --known-sites ${known_sites_elsites}": ""
def target_regions_cmd = target_regions_bed ? " --target-regions ${target_regions_bed}": ""
// bqsr args
def bqsr_cmd = run_bqsr ? " --bqsr ${prefix}.recall": ""
def bqsr_tables_only_cmd = bqsr_tables_only ? " --bqsr-tables-only ${prefix}.table": ""
def intermediate_bqsr_cmd = intermediate_bqsr_tables ? " --bqsr-apply .": ""
// misc
def activity_profile_cmd = get_activity_profile ? " --activity-profile ${prefix}.activity_profile.igv": ""
def assembly_regions_cmd = get_assembly_regions ? " --assembly-regions ${prefix}.assembly_regions.igv": ""
"""
elprep filter ${bam} output/${prefix}.${suffix} \\
${reference_sequences_cmd} \\
${filter_regions_cmd} \\
${markdup_cmd} \\
${haplotyper_cmd} \\
${fasta_cmd} \\
${known_sites_cmd} \\
${target_regions_cmd} \\
${bqsr_cmd} \\
${bqsr_tables_only_cmd} \\
${intermediate_bqsr_cmd} \\
${activity_profile_cmd} \\
${assembly_regions_cmd} \\
--nr-of-threads ${task.cpus} \\
$args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
elprep: \$(elprep 2>&1 | head -n2 | tail -n1 |sed 's/^.*version //;s/ compiled.*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,106 @@
name: "elprep_filter"
description: "Filter, sort and markdup sam/bam files, with optional BQSR and variant calling."
keywords:
- sort
- bam
- sam
- filter
- variant calling
tools:
- "elprep":
description: "elPrep is a high-performance tool for preparing .sam/.bam files for variant calling in sequencing pipelines. It can be used as a drop-in replacement for SAMtools/Picard/GATK4."
homepage: "https://github.com/ExaScience/elprep"
documentation: "https://github.com/ExaScience/elprep"
tool_dev_url: "https://github.com/ExaScience/elprep"
doi: "10.1371/journal.pone.0244471"
licence: "['AGPL v3']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: Input SAM/BAM file
pattern: "*.{bam,sam}"
- run_haplotypecaller:
type: boolean
description: Run variant calling on the input files. Needed to generate gvcf output.
- run_bqsr:
type: boolean
description: Run BQSR on the input files. Needed to generate recall metrics.
- reference_sequences:
type: file
description: Optional SAM header to replace existing header.
pattern: "*.sam"
- filter_regions_bed:
type: file
description: Optional BED file containing regions to filter.
pattern: "*.bed"
- reference_elfasta:
type: file
description: Elfasta file, required for BQSR and variant calling.
pattern: "*.elfasta"
- known_sites:
type: file
description: Optional elsites file containing known SNPs for BQSR.
pattern: "*.elsites"
- target_regions_bed:
type: file
description: Optional BED file containing target regions for BQSR and variant calling.
pattern: "*.bed"
- intermediate_bqsr_tables:
type: file
description: Optional list of BQSR tables, used when parsing files created by `elprep split`
pattern: "*.table"
- bqsr_tables_only:
type: boolean
description: Write intermediate BQSR tables, used when parsing files created by `elprep split`.
- get_activity_profile:
type: boolean
description: Get the activity profile calculated by the haplotypecaller to the given file in IGV format.
- get_assembly_regions:
type: boolean
description: Get the assembly regions calculated by haplotypecaller to the speficied file in IGV format.
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- bam:
type: file
description: Sorted, markdup, optionally BQSR BAM/SAM file
pattern: "*.{bam,sam}"
- metrics:
type: file
description: Optional duplicate metrics file generated by elprep
pattern: "*.{metrics.txt}"
- recall:
type: file
description: Optional recall metrics file generated by elprep
pattern: "*.{recall}"
- gvcf:
type: file
description: Optional GVCF output file
pattern: "*.{vcf.gz}"
- table:
type: file
description: Optional intermediate BQSR table output file
pattern: "*.{table}"
- activity_profile:
type: file
description: Optional activity profile output file
pattern: "*.{activity_profile.igv}"
- assembly_regions:
type: file
description: Optional activity regions output file
pattern: "*.{assembly_regions.igv}"
authors:
- "@matthdsm"

View file

@ -0,0 +1,45 @@
process ELPREP_SPLIT {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::elprep=5.1.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/elprep:5.1.2--he881be0_0':
'quay.io/biocontainers/elprep:5.1.2--he881be0_0' }"
input:
tuple val(meta), path(bam)
output:
tuple val(meta), path("output/**.{bam,sam}"), emit: bam
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def single_end = meta.single_end ? " --single-end": ""
"""
# create directory and move all input so elprep can find and merge them before splitting
mkdir input
mv ${bam} input/
mkdir ${prefix}
elprep split \\
input \\
output/ \\
$args \\
$single_end \\
--nr-of-threads $task.cpus \\
--output-prefix $prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
elprep: \$(elprep 2>&1 | head -n2 | tail -n1 |sed 's/^.*version //;s/ compiled.*\$//')
END_VERSIONS
"""
}

View file

@ -0,0 +1,43 @@
name: "elprep_split"
description: Split bam file into manageable chunks
keywords:
- bam
- split by chromosome
tools:
- "elprep":
description: "elPrep is a high-performance tool for preparing .sam/.bam files for variant calling in sequencing pipelines. It can be used as a drop-in replacement for SAMtools/Picard/GATK4."
homepage: "https://github.com/ExaScience/elprep"
documentation: "https://github.com/ExaScience/elprep"
tool_dev_url: "https://github.com/ExaScience/elprep"
doi: "10.1371"
licence: "['AGPL v3']"
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: List of BAM/SAM files
pattern: "*.{bam,sam}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
#
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- bam:
type: file
description: List of split BAM/SAM files
pattern: "*.{bam,sam}"
authors:
- "@matthdsm"

View file

@ -0,0 +1,40 @@
process KAIJU_KAIJU2TABLE {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::kaiju=1.8.2" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/kaiju:1.8.2--h5b5514e_1':
'quay.io/biocontainers/kaiju:1.8.2--h2e03b76_0' }"
input:
tuple val(meta), path(results)
path db
val taxon_rank
output:
tuple val(meta), path('*.txt'), emit: summary
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
dbnodes=`find -L ${db} -name "*nodes.dmp"`
dbname=`find -L ${db} -name "*.fmi" -not -name "._*"`
kaiju2table $args \\
-t \$dbnodes \\
-n \$dbname \\
-r ${taxon_rank} \\
-o ${prefix}.txt \\
${results}
cat <<-END_VERSIONS > versions.yml
"${task.process}":
kaiju: \$(echo \$( kaiju -h 2>&1 | sed -n 1p | sed 's/^.*Kaiju //' ))
END_VERSIONS
"""
}

View file

@ -0,0 +1,50 @@
name: "kaiju_kaiju2table"
description: write your description here
keywords:
- classify
- metagenomics
tools:
- kaiju:
description: Fast and sensitive taxonomic classification for metagenomics
homepage: https://kaiju.binf.ku.dk/
documentation: https://github.com/bioinformatics-centre/kaiju/blob/master/README.md
tool_dev_url: https://github.com/bioinformatics-centre/kaiju
doi: "10.1038/ncomms11257"
licence: ["GNU GPL v3"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- results:
type: file
description: File containing the kaiju classification results
pattern: "*.{txt}"
- taxon_rank:
type: string
description: |
Taxonomic rank to display in report
pattern: "phylum|class|order|family|genus|species"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- results:
type: file
description: |
Summary table for a given taxonomic rank
pattern: "*.{tsv}"
authors:
- "@sofstam"
- "@talnor"
- "@jfy133"

View file

@ -15,8 +15,8 @@ process PICARD_COLLECTHSMETRICS {
path target_intervals path target_intervals
output: output:
tuple val(meta), path("*collecthsmetrics.txt"), emit: hs_metrics tuple val(meta), path("*_metrics") , emit: metrics
path "versions.yml" , emit: versions path "versions.yml" , emit: versions
when: when:
task.ext.when == null || task.ext.when task.ext.when == null || task.ext.when
@ -41,7 +41,8 @@ process PICARD_COLLECTHSMETRICS {
-BAIT_INTERVALS $bait_intervals \\ -BAIT_INTERVALS $bait_intervals \\
-TARGET_INTERVALS $target_intervals \\ -TARGET_INTERVALS $target_intervals \\
-INPUT $bam \\ -INPUT $bam \\
-OUTPUT ${prefix}_collecthsmetrics.txt -OUTPUT ${prefix}.CollectHsMetrics.coverage_metrics
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":
@ -52,7 +53,7 @@ process PICARD_COLLECTHSMETRICS {
stub: stub:
def prefix = task.ext.prefix ?: "${meta.id}" def prefix = task.ext.prefix ?: "${meta.id}"
""" """
touch ${prefix}_collecthsmetrics.txt touch ${prefix}.CollectHsMetrics.coverage_metrics
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":

View file

@ -57,10 +57,11 @@ output:
type: file type: file
description: File containing software versions description: File containing software versions
pattern: "versions.yml" pattern: "versions.yml"
- hs_metrics: - metrics:
type: file type: file
description: The metrics file. description: Alignment metrics files generated by picard
pattern: "*_collecthsmetrics.txt" pattern: "*_{metrics}"
authors: authors:
- "@projectoriented" - "@projectoriented"
- "@matthdsm"

View file

@ -8,7 +8,7 @@ process SAMTOOLS_VIEW {
'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }" 'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
input: input:
tuple val(meta), path(input) tuple val(meta), path(input), path(index)
path fasta path fasta
output: output:

View file

@ -25,6 +25,10 @@ input:
type: file type: file
description: BAM/CRAM/SAM file description: BAM/CRAM/SAM file
pattern: "*.{bam,cram,sam}" pattern: "*.{bam,cram,sam}"
- index:
type: optional file
description: BAM.BAI/CRAM.CRAI file
pattern: "*.{.bai,.crai}"
- fasta: - fasta:
type: optional file type: optional file
description: Reference file the CRAM was created with description: Reference file the CRAM was created with

View file

@ -33,4 +33,15 @@ process STRANGER {
stranger: \$( stranger --version ) stranger: \$( stranger --version )
END_VERSIONS END_VERSIONS
""" """
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
touch ${prefix}.vcf.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
stranger: \$( stranger --version )
END_VERSIONS
"""
} }

View file

@ -599,6 +599,14 @@ ectyper:
- modules/ectyper/** - modules/ectyper/**
- tests/modules/ectyper/** - tests/modules/ectyper/**
elprep/filter:
- modules/elprep/filter/**
- tests/modules/elprep/filter/**
elprep/split:
- modules/elprep/split/**
- tests/modules/elprep/split/**
emmtyper: emmtyper:
- modules/emmtyper/** - modules/emmtyper/**
- tests/modules/emmtyper/** - tests/modules/emmtyper/**
@ -1001,6 +1009,10 @@ kaiju/kaiju:
- modules/kaiju/kaiju/** - modules/kaiju/kaiju/**
- tests/modules/kaiju/kaiju/** - tests/modules/kaiju/kaiju/**
kaiju/kaiju2table:
- modules/kaiju/kaiju2table/**
- tests/modules/kaiju/kaiju2table/**
kallisto/index: kallisto/index:
- modules/kallisto/index/** - modules/kallisto/index/**
- tests/modules/kallisto/index/** - tests/modules/kallisto/index/**

View file

@ -112,6 +112,7 @@ params {
} }
'homo_sapiens' { 'homo_sapiens' {
'genome' { 'genome' {
genome_elfasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome.elfasta"
genome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome.fasta" genome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome.fasta"
genome_fasta_fai = "${test_data_dir}/genomics/homo_sapiens/genome/genome.fasta.fai" genome_fasta_fai = "${test_data_dir}/genomics/homo_sapiens/genome/genome.fasta.fai"
genome_dict = "${test_data_dir}/genomics/homo_sapiens/genome/genome.dict" genome_dict = "${test_data_dir}/genomics/homo_sapiens/genome/genome.dict"
@ -123,6 +124,7 @@ params {
genome_header = "${test_data_dir}/genomics/homo_sapiens/genome/genome.header" genome_header = "${test_data_dir}/genomics/homo_sapiens/genome/genome.header"
genome_bed_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed.gz" genome_bed_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed.gz"
genome_bed_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed.gz.tbi" genome_bed_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/genome.bed.gz.tbi"
genome_elsites = "${test_data_dir}/genomics/homo_sapiens/genome/genome.elsites"
transcriptome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/transcriptome.fasta" transcriptome_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/transcriptome.fasta"
genome2_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome2.fasta" genome2_fasta = "${test_data_dir}/genomics/homo_sapiens/genome/genome2.fasta"
genome_chain_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.chain.gz" genome_chain_gz = "${test_data_dir}/genomics/homo_sapiens/genome/genome.chain.gz"
@ -136,6 +138,7 @@ params {
genome_21_multi_interval_bed_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/multi_intervals.bed.gz.tbi" genome_21_multi_interval_bed_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/multi_intervals.bed.gz.tbi"
genome_21_chromosomes_dir = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/chromosomes.tar.gz" genome_21_chromosomes_dir = "${test_data_dir}/genomics/homo_sapiens/genome/chr21/sequence/chromosomes.tar.gz"
dbsnp_146_hg38_elsites = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.elsites"
dbsnp_146_hg38_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.vcf.gz" dbsnp_146_hg38_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.vcf.gz"
dbsnp_146_hg38_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.vcf.gz.tbi" dbsnp_146_hg38_vcf_gz_tbi = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/dbsnp_146.hg38.vcf.gz.tbi"
gnomad_r2_1_1_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/gnomAD.r2.1.1.vcf.gz" gnomad_r2_1_1_vcf_gz = "${test_data_dir}/genomics/homo_sapiens/genome/vcf/gnomAD.r2.1.1.vcf.gz"

View file

@ -0,0 +1,18 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { ELPREP_FILTER } from '../../../../modules/elprep/filter/main.nf'
workflow test_elprep_filter {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true)
]
reference_elfasta = file(params.test_data['homo_sapiens']['genome']['genome_elfasta'], checkIfExists: true)
known_sites_elsites = file(params.test_data['homo_sapiens']['genome']['dbsnp_146_hg38_elsites'], checkIfExists: true)
target_regions_bed = file(params.test_data['homo_sapiens']['genome']['genome_bed'], checkIfExists: true)
ELPREP_FILTER ( input, true, true, [], [], reference_elfasta, known_sites_elsites, target_regions_bed, [], [], true, true)
}

View file

@ -0,0 +1,7 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: ELPREP_FILTER {
ext.args = "--mark-duplicates "
}
}

View file

@ -0,0 +1,13 @@
- name: elprep filter test_elprep_filter
command: nextflow run tests/modules/elprep/filter -entry test_elprep_filter -c tests/config/nextflow.config
tags:
- elprep
- elprep/filter
files:
- path: output/elprep/test.activity_profile.igv
- path: output/elprep/test.assembly_regions.igv
- path: output/elprep/output/test.bam
- path: output/elprep/test.g.vcf.gz
- path: output/elprep/test.metrics.txt
- path: output/elprep/test.recall
- path: output/elprep/versions.yml

View file

@ -0,0 +1,15 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { ELPREP_SPLIT } from '../../../../modules/elprep/split/main.nf'
workflow test_elprep_split {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true)
]
ELPREP_SPLIT ( input )
}

View file

@ -0,0 +1,9 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName : ELPREP_SPLIT {
ext.args = "--contig-group-size 1 --output-type bam"
}
}

View file

@ -0,0 +1,10 @@
- name: elprep split test_elprep_split
command: nextflow run tests/modules/elprep/split -entry test_elprep_split -c tests/config/nextflow.config
tags:
- elprep
- elprep/split
files:
- path: output/elprep/output/splits/test-group00001.bam
- path: output/elprep/output/splits/test-unmapped.bam
- path: output/elprep/output/test-spread.bam
- path: output/elprep/versions.yml

View file

@ -0,0 +1,21 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { UNTAR } from '../../../../modules/untar/main.nf'
include { KAIJU_KAIJU } from '../../../../modules/kaiju/kaiju/main.nf'
include { KAIJU_KAIJU2TABLE } from '../../../../modules/kaiju/kaiju2table/main.nf'
workflow test_kaiju_kaiju_single_end {
input = [
[ id:'test', single_end:true ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
]
db = [ [], file(params.test_data['sarscov2']['genome']['kaiju_tar_gz'], checkIfExists: true) ]
taxon_rank = "species"
ch_db = UNTAR ( db )
KAIJU_KAIJU ( input, ch_db.untar.map{ it[1] } )
KAIJU_KAIJU2TABLE ( KAIJU_KAIJU.out.results, ch_db.untar.map{ it[1] }, taxon_rank )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,9 @@
- name: kaiju kaiju2table test_kaiju_kaiju_single_end
command: nextflow run tests/modules/kaiju/kaiju2table -entry test_kaiju_kaiju_single_end -c tests/config/nextflow.config
tags:
- kaiju
- kaiju/kaiju2table
files:
- path: output/kaiju/test.txt
md5sum: 0d9f8fd36fcf2888296ae12632c5f0a8
- path: output/kaiju/versions.yml

View file

@ -7,7 +7,7 @@ include { PICARD_COLLECTHSMETRICS } from '../../../../modules/picard/collecthsme
workflow test_picard_collecthsmetrics { workflow test_picard_collecthsmetrics {
input = [ [ id:'test', single_end:false ], // meta map input = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true) ] file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true) ]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true) fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['sarscov2']['genome']['genome_fasta_fai'], checkIfExists: true) fai = file(params.test_data['sarscov2']['genome']['genome_fasta_fai'], checkIfExists: true)

View file

@ -5,4 +5,4 @@
- picard/collecthsmetrics - picard/collecthsmetrics
files: files:
# The file can't be md5'd consistently # The file can't be md5'd consistently
- path: output/picard/test_collecthsmetrics.txt - path: output/picard/test.CollectHsMetrics.coverage_metrics

View file

@ -6,7 +6,8 @@ include { SAMTOOLS_VIEW } from '../../../../modules/samtools/view/main.nf'
workflow test_samtools_view { workflow test_samtools_view {
input = [ [ id:'test', single_end:false ], // meta map input = [ [ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true) file(params.test_data['sarscov2']['illumina']['test_paired_end_bam'], checkIfExists: true),
[]
] ]
SAMTOOLS_VIEW ( input, [] ) SAMTOOLS_VIEW ( input, [] )
@ -14,8 +15,8 @@ workflow test_samtools_view {
workflow test_samtools_view_cram { workflow test_samtools_view_cram {
input = [ [ id: 'test' ], // meta map input = [ [ id: 'test' ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_cram'], checkIfExists: true), file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_cram'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_cram_crai'], checkIfExists: true) file(params.test_data['homo_sapiens']['illumina']['test_paired_end_recalibrated_sorted_cram_crai'], checkIfExists: true)
] ]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true) fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)

View file

@ -23,3 +23,8 @@ workflow test_stranger_without_optional_variant_catalog {
EXPANSIONHUNTER ( input, fasta, variant_catalog ) EXPANSIONHUNTER ( input, fasta, variant_catalog )
STRANGER ( EXPANSIONHUNTER.out.vcf, [] ) STRANGER ( EXPANSIONHUNTER.out.vcf, [] )
} }
workflow test_stranger_without_optional_variant_catalog_stubs {
EXPANSIONHUNTER ( input, fasta, variant_catalog )
STRANGER ( EXPANSIONHUNTER.out.vcf, [] )
}

View file

@ -25,3 +25,13 @@
md5sum: bbe15159195681d5c18596d3ad85c78f md5sum: bbe15159195681d5c18596d3ad85c78f
- path: output/stranger/versions.yml - path: output/stranger/versions.yml
md5sum: 8558542a007e90ea5dcdceed3f12585d md5sum: 8558542a007e90ea5dcdceed3f12585d
- name: stranger test_stranger_without_optional_variant_catalog_stubs
command: nextflow run tests/modules/stranger -entry test_stranger_without_optional_variant_catalog -c tests/config/nextflow.config -stub-run
tags:
- stranger
files:
- path: output/expansionhunter/test.vcf
- path: output/expansionhunter/versions.yml
- path: output/stranger/test.vcf.gz
- path: output/stranger/versions.yml