Merge branch 'nf-core:master' into master

This commit is contained in:
James A. Fellows Yates 2022-03-03 16:10:45 +01:00 committed by GitHub
commit c1d8dc4c15
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 528 additions and 43 deletions

View file

@ -11,9 +11,15 @@ process ADAPTERREMOVAL {
tuple val(meta), path(reads)
output:
tuple val(meta), path('*.fastq.gz'), emit: reads
tuple val(meta), path('*.log') , emit: log
path "versions.yml" , emit: versions
tuple val(meta), path('*.truncated.gz') , optional: true, emit: singles_truncated
tuple val(meta), path('*.discarded.gz') , optional: true, emit: discarded
tuple val(meta), path('*.pair1.truncated.gz') , optional: true, emit: pair1_truncated
tuple val(meta), path('*.pair2.truncated.gz') , optional: true, emit: pair2_truncated
tuple val(meta), path('*.collapsed.gz') , optional: true, emit: collapsed
tuple val(meta), path('*.collapsed.truncated') , optional: true, emit: collapsed_truncated
tuple val(meta), path('*paired.gz') , optional: true, emit: paired_interleaved
tuple val(meta), path('*.log') , emit: log
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
@ -28,30 +34,27 @@ process ADAPTERREMOVAL {
--file1 $reads \\
$args \\
--basename $prefix \\
--threads $task.cpus \\
--threads ${task.cpus} \\
--settings ${prefix}.log \\
--output1 ${prefix}.trimmed.fastq.gz \\
--seed 42 \\
--gzip \\
--gzip
cat <<-END_VERSIONS > versions.yml
"${task.process}":
adapterremoval: \$(AdapterRemoval --version 2>&1 | sed -e "s/AdapterRemoval ver. //g")
END_VERSIONS
"""
} else if (!meta.single_end && !meta.collapse) {
} else if (!meta.single_end ) {
"""
AdapterRemoval \\
--file1 ${reads[0]} \\
--file2 ${reads[1]} \\
$args \\
--basename $prefix \\
--threads $task.cpus \\
--threads ${task.cpus} \\
--settings ${prefix}.log \\
--output1 ${prefix}.pair1.trimmed.fastq.gz \\
--output2 ${prefix}.pair2.trimmed.fastq.gz \\
--seed 42 \\
--gzip \\
--gzip
cat <<-END_VERSIONS > versions.yml
"${task.process}":
@ -63,13 +66,12 @@ process ADAPTERREMOVAL {
AdapterRemoval \\
--file1 ${reads[0]} \\
--file2 ${reads[1]} \\
--collapse \\
$args \\
--basename $prefix \\
--threads $task.cpus \\
--settings ${prefix}.log \\
--seed 42 \\
--gzip \\
--gzip
cat *.collapsed.gz *.collapsed.truncated.gz > ${prefix}.merged.fastq.gz
cat <<-END_VERSIONS > versions.yml

View file

@ -17,13 +17,13 @@ input:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false, collapse:false ]
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: |
List of input FastQ files of size 1 and 2 for single-end and paired-end data,
respectively.
pattern: "*.{fq,fastq,fg.gz,fastq.gz}"
pattern: "*.{fq,fastq,fq.gz,fastq.gz}"
output:
- meta:
@ -31,12 +31,45 @@ output:
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
- singles_truncated:
type: file
description: |
List of input adapter trimmed FastQ files of size 1 or 2 for
single-end or collapsed data and paired-end data, respectively.
pattern: "*.{fastq.gz}"
Adapter trimmed FastQ files of either single-end reads, or singleton
'orphaned' reads from merging of paired-end data (i.e., one of the pair
was lost due to filtering thresholds).
pattern: "*.truncated.gz"
- discarded:
type: file
description: |
Adapter trimmed FastQ files of reads that did not pass filtering
thresholds.
pattern: "*.discarded.gz"
- pair1_truncated:
type: file
description: |
Adapter trimmed R1 FastQ files of paired-end reads that did not merge
with their respective R2 pair due to long templates. The respective pair
is stored in 'pair2_truncated'.
pattern: "*.pair1.truncated.gz"
- pair2_truncated:
type: file
description: |
Adapter trimmed R2 FastQ files of paired-end reads that did not merge
with their respective R1 pair due to long templates. The respective pair
is stored in 'pair1_truncated'.
pattern: "*.pair2.truncated.gz"
- collapsed:
type: file
description: |
Collapsed FastQ of paired-end reads that successfully merged with their
respective R1 pair but were not trimmed.
pattern: "*.collapsed.gz"
- collapsed_truncated:
type: file
description: |
Collapsed FastQ of paired-end reads that successfully merged with their
respective R1 pair and were trimmed of adapter due to sufficient overlap.
pattern: "*.collapsed.truncated.gz"
- log:
type: file
description: AdapterRemoval log file
@ -48,3 +81,4 @@ output:
authors:
- "@maxibor"
- "@jfy133"

View file

@ -17,8 +17,8 @@ process DEEPVARIANT {
path(fai)
output:
tuple val(meta), path("*.vcf.gz") , emit: vcf
tuple val(meta), path("*g.vcf.gz"), emit: gvcf
tuple val(meta), path("${prefix}.vcf.gz") , emit: vcf
tuple val(meta), path("${prefix}.g.vcf.gz"), emit: gvcf
path "versions.yml" , emit: versions
when:
@ -26,7 +26,7 @@ process DEEPVARIANT {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
prefix = task.ext.prefix ?: "${meta.id}"
def regions = intervals ? "--regions ${intervals}" : ""
"""

View file

@ -13,6 +13,7 @@ process FAQCS {
output:
tuple val(meta), path('*.trimmed.fastq.gz') , emit: reads
tuple val(meta), path('*.stats.txt') , emit: stats
tuple val(meta), path('*.txt') , optional:true, emit: txt
tuple val(meta), path('*_qc_report.pdf') , optional:true, emit: statspdf
tuple val(meta), path('*.log') , emit: log
tuple val(meta), path('*.discard.fastq.gz') , optional:true, emit: reads_fail

View file

@ -54,6 +54,10 @@ output:
type: file
description: trimming/qc text stats file
pattern: "*.stats.txt"
- txt:
type: file
description: trimming/qc text txt files from --debug option
pattern: "*.txt"
- statspdf:
type: file
description: trimming/qc pdf report file

View file

@ -0,0 +1,44 @@
process HAMRONIZATION_DEEPARG {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::hamronization=1.0.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/hamronization:1.0.3--py_0':
'quay.io/biocontainers/hamronization:1.0.3--py_0' }"
input:
tuple val(meta), path(report)
val(format)
val(software_version)
val(reference_db_version)
output:
tuple val(meta), path("*.json"), optional: true, emit: json
tuple val(meta), path("*.tsv") , optional: true, emit: tsv
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
hamronize \\
deeparg \\
${report} \\
$args \\
--format ${format} \\
--analysis_software_version ${software_version} \\
--reference_database_version ${reference_db_version} \\
--input_file_name ${prefix} \\
> ${prefix}.${format}
cat <<-END_VERSIONS > versions.yml
"${task.process}":
hamronization: \$(echo \$(hamronize --version 2>&1) | cut -f 2 -d ' ' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,60 @@
name: hamronization_deeparg
description: Tool to convert and summarize DeepARG outputs using the hAMRonization specification
keywords:
- amr
- antimicrobial resistance
- reporting
- deeparg
tools:
- hamronization:
description: Tool to convert and summarize AMR gene detection outputs using the hAMRonization specification
homepage: https://github.com/pha4ge/hAMRonization/blob/master/README.md
documentation: https://github.com/pha4ge/hAMRonization/blob/master/README.md
tool_dev_url: https://github.com/pha4ge/hAMRonization
doi: ""
licence: ['GNU Lesser General Public v3 (LGPL v3)']
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- report:
type: file
description: Output .mapping.ARG file from DeepARG
pattern: "*.mapping.ARG"
- format:
type: value
description: Type of report file to be produced
pattern: "tsv|json"
- software_version:
type: value
description: Version of DeepARG used
pattern: "[0-9].[0-9].[0-9]"
- reference_db_version:
type: value
description: Database version of DeepARG used
pattern: "[0-9]"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- json:
type: file
description: hAMRonised report in JSON format
pattern: "*.json"
- tsv:
type: file
description: hAMRonised report in TSV format
pattern: "*.json"
authors:
- "@jfy133"

View file

@ -0,0 +1,38 @@
process HAMRONIZATION_SUMMARIZE {
label 'process_low'
conda (params.enable_conda ? "bioconda::hamronization=1.0.3" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/hamronization:1.0.3--py_0':
'quay.io/biocontainers/hamronization:1.0.3--py_0' }"
input:
path(reports)
val(format)
output:
path("hamronization_combined_report.json"), optional: true, emit: json
path("hamronization_combined_report.tsv") , optional: true, emit: tsv
path("hamronization_combined_report.html"), optional: true, emit: html
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def outformat = format == 'interactive' ? 'html' : format
"""
hamronize \\
summarize \\
${reports.join(' ')} \\
-t ${format} \\
$args \\
-o hamronization_combined_report.${outformat}
cat <<-END_VERSIONS > versions.yml
"${task.process}":
hamronization: \$(echo \$(hamronize --version 2>&1) | cut -f 2 -d ' ' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,45 @@
name: hamronization_summarize
description: Tool to summarize and combine all hAMRonization reports into a single file
keywords:
- amr
- antimicrobial resistance
- reporting
tools:
- hamronization:
description: Tool to convert and summarize AMR gene detection outputs using the hAMRonization specification
homepage: https://github.com/pha4ge/hAMRonization/blob/master/README.md
documentation: https://github.com/pha4ge/hAMRonization/blob/master/README.md
tool_dev_url: https://github.com/pha4ge/hAMRonization
doi: ""
licence: ['GNU Lesser General Public v3 (LGPL v3)']
input:
- reports:
type: file
description: List of multiple hAMRonization reports in either JSON or TSV format
pattern: "*.{json,tsv}"
- format:
type: value
description: Type of final combined report file to be produced
pattern: "tsv|json|interactive"
output:
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- json:
type: file
description: hAMRonised summary in JSON format
pattern: "*.json"
- tsv:
type: file
description: hAMRonised summary in TSV format
pattern: "*.json"
- html:
type: file
description: hAMRonised summary in HTML format
pattern: "*.html"
authors:
- "@jfy133"

View file

@ -23,11 +23,13 @@ process PLINK2_EXTRACT {
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
if( "$pgen" == "${prefix}.pgen" ) error "Input and output names are the same, use \"task.ext.prefix\" in modules.config to disambiguate!"
def mem_mb = task.memory.toMega()
"""
plink2 \\
--threads $task.cpus \\
--memory $mem_mb \\
--pfile ${pgen.baseName} \\
$args \\
--threads $task.cpus \\
--extract $variants \\
--make-pgen vzs \\
--out ${prefix}

View file

@ -11,10 +11,10 @@ process PLINK2_VCF {
tuple val(meta), path(vcf)
output:
tuple val(meta), path("*.pgen"), emit: pgen
tuple val(meta), path("*.psam"), emit: psam
tuple val(meta), path("*.pvar"), emit: pvar
path "versions.yml" , emit: versions
tuple val(meta), path("*.pgen") , emit: pgen
tuple val(meta), path("*.psam") , emit: psam
tuple val(meta), path("*.pvar.zst"), emit: pvar
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
@ -22,10 +22,14 @@ process PLINK2_VCF {
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def mem_mb = task.memory.toMega()
"""
plink2 \\
--threads $task.cpus \\
--memory $mem_mb \\
$args \\
--vcf $vcf \\
--make-pgen vzs \\
--out ${prefix}
cat <<-END_VERSIONS > versions.yml

View file

@ -46,7 +46,7 @@ output:
- pvar:
type: file
description: PLINK 2 variant information file
pattern: "*.{psam}"
pattern: "*.{pvar.zst}"
authors:
- "@nebfield"

33
modules/stranger/main.nf Normal file
View file

@ -0,0 +1,33 @@
process STRANGER {
tag "$meta.id"
label 'process_low'
conda (params.enable_conda ? "bioconda::stranger=0.8.1" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/stranger:0.8.1--pyh5e36f6f_0':
'quay.io/biocontainers/stranger:0.8.1--pyh5e36f6f_0' }"
input:
tuple val(meta), path(vcf)
output:
tuple val(meta), path("*.gz"), emit: vcf
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
stranger \\
$args \\
$vcf | gzip --no-name > ${prefix}.vcf.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
stranger: \$( stranger --version )
END_VERSIONS
"""
}

44
modules/stranger/meta.yml Normal file
View file

@ -0,0 +1,44 @@
name: stranger
description: Annotates output files from ExpansionHunter with the pathologic implications of the repeat sizes.
keywords:
- STR
- repeat_expansions
- annotate
- vcf
tools:
- stranger:
description: Annotate VCF files with str variants
homepage: https://github.com/moonso/stranger
documentation: https://github.com/moonso/stranger
tool_dev_url: https://github.com/moonso/stranger
doi: "10.5281/zenodo.4548873"
licence: ['MIT']
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- vcf:
type: file
description: VCF with repeat expansions
pattern: "*.{vcf.gz,vcf}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- vcf:
type: file
description: annotated VCF with keys STR_STATUS, NormalMax and PathologicMin
pattern: "*.{vcf.gz}"
authors:
- "@ljmesi"

View file

@ -2,7 +2,7 @@ process UNTAR {
tag "$archive"
label 'process_low'
conda (params.enable_conda ? "conda-forge::sed=4.7" : null)
conda (params.enable_conda ? "conda-forge::tar=1.32" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://containers.biocontainers.pro/s3/SingImgsRepo/biocontainers/v1.2.0_cv1/biocontainers_v1.2.0_cv1.img' :
'biocontainers/biocontainers:v1.2.0_cv1' }"

View file

@ -752,6 +752,14 @@ gunzip:
- modules/gunzip/**
- tests/modules/gunzip/**
hamronization/deeparg:
- modules/hamronization/deeparg/**
- tests/modules/hamronization/deeparg/**
hamronization/summarize:
- modules/hamronization/summarize/**
- tests/modules/hamronization/summarize/**
hicap:
- modules/hicap/**
- tests/modules/hicap/**
@ -1565,6 +1573,10 @@ star/genomegenerate:
- modules/star/genomegenerate/**
- tests/modules/star/genomegenerate/**
stranger:
- modules/stranger/**
- tests/modules/stranger/**
strelka/germline:
- modules/strelka/germline/**
- tests/modules/strelka/germline/**

View file

@ -68,7 +68,7 @@ params {
test_computematrix_mat_gz = "${test_data_dir}/genomics/sarscov2/illumina/deeptools/test.computeMatrix.mat.gz"
test_bcf = "${test_data_dir}/genomics/sarscov2/illumina/vcf/test.bcf"
test_vcf = "${test_data_dir}/genomics/sarscov2/illumina/vcf/test.vcf"
test_vcf_gz = "${test_data_dir}/genomics/sarscov2/illumina/vcf/test.vcf.gz"
test_vcf_gz_tbi = "${test_data_dir}/genomics/sarscov2/illumina/vcf/test.vcf.gz.tbi"
@ -321,6 +321,8 @@ params {
'genome' {
genome_fna_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.fna.gz"
genome_paf = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.paf"
genome_mapping_potential_arg = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/genome/genome.mapping.potential.ARG"
}
'illumina' {
test1_contigs_fa_gz = "${test_data_dir}/genomics/prokaryotes/bacteroides_fragilis/illumina/fasta/test1.contigs.fa.gz"

View file

@ -1,31 +1,44 @@
- name: adapterremoval test_adapterremoval_single_end
command: nextflow run ./tests/modules/adapterremoval -entry test_adapterremoval_single_end -c ./tests/config/nextflow.config -c ./tests/modules/adapterremoval/nextflow.config
command: nextflow run tests/modules/adapterremoval -entry test_adapterremoval_single_end -c tests/config/nextflow.config
tags:
- adapterremoval
files:
- path: output/adapterremoval/test.discarded.gz
- path: output/adapterremoval/test.log
md5sum: 2fd3d5d703b63ba33a83021fccf25f77
- path: output/adapterremoval/test.trimmed.fastq.gz
- path: output/adapterremoval/test.truncated.gz
md5sum: 62139afee94defad5b83bdd0b8475a1f
- path: output/adapterremoval/versions.yml
md5sum: ac5b46719719b7ee62739530b80869fc
- name: adapterremoval test_adapterremoval_paired_end
command: nextflow run ./tests/modules/adapterremoval -entry test_adapterremoval_paired_end -c ./tests/config/nextflow.config -c ./tests/modules/adapterremoval/nextflow.config
command: nextflow run tests/modules/adapterremoval -entry test_adapterremoval_paired_end -c tests/config/nextflow.config
tags:
- adapterremoval
files:
- path: output/adapterremoval/test.discarded.gz
- path: output/adapterremoval/test.log
md5sum: b8a451d3981b327f3fdb44f40ba2d6d1
- path: output/adapterremoval/test.pair1.trimmed.fastq.gz
- path: output/adapterremoval/test.pair1.truncated.gz
md5sum: 294a6277f0139bd597e57c6fa31f39c7
- path: output/adapterremoval/test.pair2.trimmed.fastq.gz
- path: output/adapterremoval/test.pair2.truncated.gz
md5sum: de7b38e2c881bced8671acb1ab452d78
- path: output/adapterremoval/test.singleton.truncated.gz
- path: output/adapterremoval/versions.yml
md5sum: fa621c887897da5a379c719399c17db7
- name: adapterremoval test_adapterremoval_paired_end_collapse
command: nextflow run ./tests/modules/adapterremoval -entry test_adapterremoval_paired_end_collapse -c ./tests/config/nextflow.config -c ./tests/modules/adapterremoval/nextflow.config
command: nextflow run tests/modules/adapterremoval -entry test_adapterremoval_paired_end_collapse -c tests/config/nextflow.config
tags:
- adapterremoval
files:
- path: output/adapterremoval/test.discarded.gz
- path: output/adapterremoval/test.log
md5sum: 7f0b2328152226e46101a535cce718b3
- path: output/adapterremoval/test.merged.fastq.gz
md5sum: 07a8f725bfd3ecbeabdc41b32d898dee
md5sum: b8a451d3981b327f3fdb44f40ba2d6d1
- path: output/adapterremoval/test.pair1.truncated.gz
md5sum: 294a6277f0139bd597e57c6fa31f39c7
- path: output/adapterremoval/test.pair2.truncated.gz
md5sum: de7b38e2c881bced8671acb1ab452d78
- path: output/adapterremoval/test.singleton.truncated.gz
- path: output/adapterremoval/versions.yml
md5sum: fd428f92a8446e0b34c5ae1c447215b8

View file

@ -1,5 +1,6 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
ext.args = {"--debug" }
}

View file

@ -3,8 +3,20 @@
tags:
- faqcs
files:
- path: output/faqcs/qa.test.base_content.txt
md5sum: f992603f01ca430c03c8aae02eba2f5d
- path: output/faqcs/qa.test.for_qual_histogram.txt
md5sum: a3d462ab84151e982f99f85f52c21de3
- path: output/faqcs/qa.test.length_count.txt
md5sum: 80915f09fbaf5884c32e95acab2d031c
- path: output/faqcs/test.base_content.txt
md5sum: f992603f01ca430c03c8aae02eba2f5d
- path: output/faqcs/test.fastp.log
md5sum: be79dc893f87de1f82faf749cdfb848c
- path: output/faqcs/test.for_qual_histogram.txt
md5sum: a3d462ab84151e982f99f85f52c21de3
- path: output/faqcs/test.length_count.txt
md5sum: 80915f09fbaf5884c32e95acab2d031c
- path: output/faqcs/test.stats.txt
md5sum: ea20e93706b2e4c676004253baa3cec6
- path: output/faqcs/test.trimmed.fastq.gz
@ -18,8 +30,20 @@
tags:
- faqcs
files:
- path: output/faqcs/qa.test.base_content.txt
md5sum: 99aa9a775ccd8d6503f0cf80f775203c
- path: output/faqcs/qa.test.for_qual_histogram.txt
md5sum: 4f4b131be5425bdfa4b3237e44fa7d48
- path: output/faqcs/qa.test.length_count.txt
md5sum: 420298983c762754d5b0ef32c9d5dad4
- path: output/faqcs/test.base_content.txt
md5sum: 99aa9a775ccd8d6503f0cf80f775203c
- path: output/faqcs/test.fastp.log
md5sum: be79dc893f87de1f82faf749cdfb848c
- path: output/faqcs/test.for_qual_histogram.txt
md5sum: 4f4b131be5425bdfa4b3237e44fa7d48
- path: output/faqcs/test.length_count.txt
md5sum: 420298983c762754d5b0ef32c9d5dad4
- path: output/faqcs/test.stats.txt
md5sum: 9a693f8af94ab8c485519d9a523aa622
- path: output/faqcs/test_1.trimmed.fastq.gz

View file

@ -0,0 +1,15 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { HAMRONIZATION_DEEPARG } from '../../../../modules/hamronization/deeparg/main.nf'
workflow test_hamronization_deeparg {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['bacteroides_fragilis']['genome']['genome_mapping_potential_arg'], checkIfExists: true),
]
HAMRONIZATION_DEEPARG ( input, 'tsv', '1.0.2', '2' )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,8 @@
- name: hamronization deeparg test_hamronization_deeparg
command: nextflow run tests/modules/hamronization/deeparg -entry test_hamronization_deeparg -c tests/config/nextflow.config
tags:
- hamronization
- hamronization/deeparg
files:
- path: output/hamronization/test.tsv
md5sum: 3c315605aca0c5964796bb5fd4cdd522

View file

@ -0,0 +1,36 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { HAMRONIZATION_DEEPARG } from '../../../../modules/hamronization/deeparg/main.nf'
include { HAMRONIZATION_DEEPARG as HAMRONIZATION_DEEPARG_SECOND } from '../../../../modules/hamronization/deeparg/main.nf'
include { HAMRONIZATION_SUMMARIZE } from '../../../../modules/hamronization/summarize/main.nf'
workflow test_hamronization_summarize {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['bacteroides_fragilis']['genome']['genome_mapping_potential_arg'], checkIfExists: true),
]
input2 = [
[ id:'test2', single_end:false ], // meta map
file(params.test_data['bacteroides_fragilis']['genome']['genome_mapping_potential_arg'], checkIfExists: true),
]
HAMRONIZATION_DEEPARG ( input, 'tsv', '1.0.2', '2' )
HAMRONIZATION_DEEPARG_SECOND ( input2, 'tsv', '1.0.2', '2' )
ch_deeparg_run_one = HAMRONIZATION_DEEPARG.out.tsv
ch_deeparg_run_two = HAMRONIZATION_DEEPARG_SECOND.out.tsv
ch_deeparg_run_one
.mix( ch_deeparg_run_two )
.map{
[ it[1] ]
}
.collect()
.set { ch_input_for_summarize }
HAMRONIZATION_SUMMARIZE ( ch_input_for_summarize , 'json' )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,14 @@
- name: hamronization summarize test_hamronization_summarize
command: nextflow run tests/modules/hamronization/summarize -entry test_hamronization_summarize -c tests/config/nextflow.config
tags:
- hamronization
- hamronization/summarize
files:
- path: output/hamronization/hamronization_combined_report.json
md5sum: 1623b6cc3b213208a425e023edd94691
- path: output/hamronization/test.tsv
md5sum: 3c315605aca0c5964796bb5fd4cdd522
- path: output/hamronization/test2.tsv
md5sum: 453f38502e35261a50a0849dca34f05b
- path: output/hamronization/versions.yml
md5sum: 99b5046fac643e16ca3362d1baf3284b

View file

@ -1,12 +1,14 @@
- name: plink2 vcf test_plink2_vcf
command: nextflow run ./tests/modules/plink2/vcf -entry test_plink2_vcf -c ./tests/config/nextflow.config -c ./tests/modules/plink2/vcf/nextflow.config
command: nextflow run tests/modules/plink2/vcf -entry test_plink2_vcf -c tests/config/nextflow.config
tags:
- plink2/vcf
- plink2
- plink2/vcf
files:
- path: output/plink2/test.pgen
md5sum: d66d3cd4a6c9cca1a4073d7f4b277041
- path: output/plink2/test.psam
md5sum: dc3b77d7753a7bed41734323e3549b10
- path: output/plink2/test.pvar
md5sum: d61e53f847a6335138b584216b4e45d0
- path: output/plink2/test.pvar.zst
md5sum: b53cccb83e024a39789af5eab8de1c28
- path: output/plink2/versions.yml
md5sum: 82ada74bc81473b7cba377f696acf54c

View file

@ -0,0 +1,19 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { EXPANSIONHUNTER } from '../../../modules/expansionhunter/main.nf'
include { STRANGER } from '../../../modules/stranger/main.nf'
workflow test_stranger {
input = [ [ id:'test', gender:'male' ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam_bai'], checkIfExists: true),
]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
variant_catalog = file(params.test_data['homo_sapiens']['genome']['repeat_expansions'], checkIfExists: true)
EXPANSIONHUNTER ( input, fasta, variant_catalog )
STRANGER ( EXPANSIONHUNTER.out.vcf )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,13 @@
- name: stranger test_stranger
command: nextflow run tests/modules/stranger -entry test_stranger -c tests/config/nextflow.config
tags:
- stranger
files:
- path: output/expansionhunter/test.vcf
md5sum: cfd4a1d35c0e469b99eb6aaa6d22de76
- path: output/expansionhunter/versions.yml
md5sum: f3962a6eecfddf9682414c0f605a885a
- path: output/stranger/test.vcf.gz
md5sum: bbe15159195681d5c18596d3ad85c78f
- path: output/stranger/versions.yml
md5sum: 5ec35fd835fb1be50bc3e7c004310fc0