Merge branch 'nf-core:master' into master

This commit is contained in:
James A. Fellows Yates 2022-03-18 08:14:36 +01:00 committed by GitHub
commit 23613051ca
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
86 changed files with 2541 additions and 106 deletions

View file

@ -86,17 +86,13 @@ jobs:
# Test the module # Test the module
- name: Run pytest-workflow - name: Run pytest-workflow
# only use one thread for pytest-workflow to avoid race condition on conda cache. # only use one thread for pytest-workflow to avoid race condition on conda cache.
run: TMPDIR=~ PROFILE=${{ matrix.profile }} pytest --tag ${{ matrix.tags }} --symlink --kwdof --git-aware run: TMPDIR=~ PROFILE=${{ matrix.profile }} pytest --tag ${{ matrix.tags }} --symlink --kwdof --git-aware --color=yes
- name: Output log on failure - name: Output log on failure
if: failure() if: failure()
run: | run: |
echo "======> log.out <=======" sudo apt install bat > /dev/null
cat /home/runner/pytest_workflow_*/*/log.out batcat --decorations=always --color=always /home/runner/pytest_workflow_*/*/log.{out,err}
echo
echo
echo "======> log.err <======="
cat /home/runner/pytest_workflow_*/*/log.err
- name: Upload logs on failure - name: Upload logs on failure
if: failure() if: failure()

View file

@ -9,15 +9,16 @@ process ADAPTERREMOVAL {
input: input:
tuple val(meta), path(reads) tuple val(meta), path(reads)
path(adapterlist)
output: output:
tuple val(meta), path('*.truncated.gz') , optional: true, emit: singles_truncated tuple val(meta), path("${prefix}.truncated.gz") , optional: true, emit: singles_truncated
tuple val(meta), path('*.discarded.gz') , optional: true, emit: discarded tuple val(meta), path("${prefix}.discarded.gz") , optional: true, emit: discarded
tuple val(meta), path('*.pair1.truncated.gz') , optional: true, emit: pair1_truncated tuple val(meta), path("${prefix}.pair1.truncated.gz") , optional: true, emit: pair1_truncated
tuple val(meta), path('*.pair2.truncated.gz') , optional: true, emit: pair2_truncated tuple val(meta), path("${prefix}.pair2.truncated.gz") , optional: true, emit: pair2_truncated
tuple val(meta), path('*.collapsed.gz') , optional: true, emit: collapsed tuple val(meta), path("${prefix}.collapsed.gz") , optional: true, emit: collapsed
tuple val(meta), path('*.collapsed.truncated') , optional: true, emit: collapsed_truncated tuple val(meta), path("${prefix}.collapsed.truncated.gz") , optional: true, emit: collapsed_truncated
tuple val(meta), path('*paired.gz') , optional: true, emit: paired_interleaved tuple val(meta), path("${prefix}.paired.gz") , optional: true, emit: paired_interleaved
tuple val(meta), path('*.log') , emit: log tuple val(meta), path('*.log') , emit: log
path "versions.yml" , emit: versions path "versions.yml" , emit: versions
@ -26,31 +27,16 @@ process ADAPTERREMOVAL {
script: script:
def args = task.ext.args ?: '' def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}" def list = adapterlist ? "--adapter-list ${adapterlist}" : ""
prefix = task.ext.prefix ?: "${meta.id}"
if (meta.single_end) { if (meta.single_end) {
""" """
AdapterRemoval \\ AdapterRemoval \\
--file1 $reads \\ --file1 $reads \\
$args \\ $args \\
--basename $prefix \\ $adapterlist \\
--threads ${task.cpus} \\ --basename ${prefix} \\
--settings ${prefix}.log \\
--seed 42 \\
--gzip
cat <<-END_VERSIONS > versions.yml
"${task.process}":
adapterremoval: \$(AdapterRemoval --version 2>&1 | sed -e "s/AdapterRemoval ver. //g")
END_VERSIONS
"""
} else if (!meta.single_end ) {
"""
AdapterRemoval \\
--file1 ${reads[0]} \\
--file2 ${reads[1]} \\
$args \\
--basename $prefix \\
--threads ${task.cpus} \\ --threads ${task.cpus} \\
--settings ${prefix}.log \\ --settings ${prefix}.log \\
--seed 42 \\ --seed 42 \\
@ -67,13 +53,13 @@ process ADAPTERREMOVAL {
--file1 ${reads[0]} \\ --file1 ${reads[0]} \\
--file2 ${reads[1]} \\ --file2 ${reads[1]} \\
$args \\ $args \\
--basename $prefix \\ $adapterlist \\
--basename ${prefix} \\
--threads $task.cpus \\ --threads $task.cpus \\
--settings ${prefix}.log \\ --settings ${prefix}.log \\
--seed 42 \\ --seed 42 \\
--gzip --gzip
cat *.collapsed.gz *.collapsed.truncated.gz > ${prefix}.merged.fastq.gz
cat <<-END_VERSIONS > versions.yml cat <<-END_VERSIONS > versions.yml
"${task.process}": "${task.process}":
adapterremoval: \$(AdapterRemoval --version 2>&1 | sed -e "s/AdapterRemoval ver. //g") adapterremoval: \$(AdapterRemoval --version 2>&1 | sed -e "s/AdapterRemoval ver. //g")

View file

@ -24,6 +24,13 @@ input:
List of input FastQ files of size 1 and 2 for single-end and paired-end data, List of input FastQ files of size 1 and 2 for single-end and paired-end data,
respectively. respectively.
pattern: "*.{fq,fastq,fq.gz,fastq.gz}" pattern: "*.{fq,fastq,fq.gz,fastq.gz}"
- adapterlist:
type: file
description:
Optional text file containing list of adapters to look for for removal
with one adapter per line. Otherwise will look for default adapters (see
AdapterRemoval man page), or can be modified to remove user-specified
adapters via ext.args.
output: output:
- meta: - meta:

155
modules/ascat/main.nf Normal file
View file

@ -0,0 +1,155 @@
process ASCAT {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::ascat=3.0.0 bioconda::cancerit-allelecount-4.3.0": null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/mulled-v2-c278c7398beb73294d78639a864352abef2931ce:dfe5aaa885de434adb2b490b68972c5840c6d761-0':
'quay.io/biocontainers/mulled-v2-c278c7398beb73294d78639a864352abef2931ce:dfe5aaa885de434adb2b490b68972c5840c6d761-0' }"
input:
tuple val(meta), path(input_normal), path(index_normal), path(input_tumor), path(index_tumor)
path(allele_files)
path(loci_files)
output:
tuple val(meta), path("*png"), emit: png
tuple val(meta), path("*cnvs.txt"), emit: cnvs
tuple val(meta), path("*purityploidy.txt"), emit: purityploidy
tuple val(meta), path("*segments.txt"), emit: segments
path "versions.yml", emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def gender = args.gender ? "$args.gender" : "NULL"
def genomeVersion = args.genomeVersion ? "$args.genomeVersion" : "NULL"
def purity = args.purity ? "$args.purity" : "NULL"
def ploidy = args.ploidy ? "$args.ploidy" : "NULL"
def gc_files = args.gc_files ? "$args.gc_files" : "NULL"
def minCounts_arg = args.minCounts ? ",minCounts = $args.minCounts" : ""
def chrom_names_arg = args.chrom_names ? ",chrom_names = $args.chrom_names" : ""
def min_base_qual_arg = args.min_base_qual ? ",min_base_qual = $args.min_base_qual" : ""
def min_map_qual_arg = args.min_map_qual ? ",min_map_qual = $args.min_map_qual" : ""
def ref_fasta_arg = args.ref_fasta ? ",ref.fasta = '$args.ref_fasta'" : ""
def skip_allele_counting_tumour_arg = args.skip_allele_counting_tumour ? ",skip_allele_counting_tumour = $args.skip_allele_counting_tumour" : ""
def skip_allele_counting_normal_arg = args.skip_allele_counting_normal ? ",skip_allele_counting_normal = $args.skip_allele_counting_normal" : ""
"""
#!/usr/bin/env Rscript
library(RColorBrewer)
library(ASCAT)
options(bitmapType='cairo')
#prepare from BAM files
ascat.prepareHTS(
tumourseqfile = "$input_tumor",
normalseqfile = "$input_normal",
tumourname = "Tumour",
normalname = "Normal",
allelecounter_exe = "alleleCounter",
alleles.prefix = "$allele_files",
loci.prefix = "$loci_files",
gender = "$gender",
genomeVersion = "$genomeVersion",
nthreads = $task.cpus
$minCounts_arg
$chrom_names_arg
$min_base_qual_arg
$min_map_qual_arg
$ref_fasta_arg
$skip_allele_counting_tumour_arg
$skip_allele_counting_normal_arg
)
#Load the data
ascat.bc = ascat.loadData(
Tumor_LogR_file = "Tumour_tumourLogR.txt",
Tumor_BAF_file = "Tumour_normalBAF.txt",
Germline_LogR_file = "Tumour_normalLogR.txt",
Germline_BAF_file = "Tumour_normalBAF.txt",
genomeVersion = "$genomeVersion",
gender = "$gender"
)
#optional GC wave correction
if(!is.null($gc_files)){
ascat.bc = ascat.GCcorrect(ascat.bc, $gc_files)
}
#Plot the raw data
ascat.plotRawData(ascat.bc)
#Segment the data
ascat.bc = ascat.aspcf(ascat.bc)
#Plot the segmented data
ascat.plotSegmentedData(ascat.bc)
#Run ASCAT to fit every tumor to a model, inferring ploidy, normal cell contamination, and discrete copy numbers
#If psi and rho are manually set:
if (!is.null($purity) && !is.null($ploidy)){
ascat.output <- ascat.runAscat(ascat.bc, gamma=1, rho_manual=$purity, psi_manual=$ploidy)
} else if(!is.null($purity) && is.null($ploidy)){
ascat.output <- ascat.runAscat(ascat.bc, gamma=1, rho_manual=$purity)
} else if(!is.null($ploidy) && is.null($purity)){
ascat.output <- ascat.runAscat(ascat.bc, gamma=1, psi_manual=$ploidy)
} else {
ascat.output <- ascat.runAscat(ascat.bc, gamma=1)
}
#Write out segmented regions (including regions with one copy of each allele)
write.table(ascat.output[["segments"]], file=paste0("$prefix", ".segments.txt"), sep="\t", quote=F, row.names=F)
#Write out CNVs in bed format
cnvs=ascat.output[["segments"]][2:6]
write.table(cnvs, file=paste0("$prefix",".cnvs.txt"), sep="\t", quote=F, row.names=F, col.names=T)
#Write out purity and ploidy info
summary <- tryCatch({
matrix(c(ascat.output[["aberrantcellfraction"]], ascat.output[["ploidy"]]), ncol=2, byrow=TRUE)}, error = function(err) {
# error handler picks up where error was generated
print(paste("Could not find optimal solution: ",err))
return(matrix(c(0,0),nrow=1,ncol=2,byrow = TRUE))
}
)
colnames(summary) <- c("AberrantCellFraction","Ploidy")
write.table(summary, file=paste0("$prefix",".purityploidy.txt"), sep="\t", quote=F, row.names=F, col.names=T)
#version export. Have to hardcode process name and software name because
#won't run inside an R-block
version_file_path="versions.yml"
f <- file(version_file_path,"w")
writeLines("ASCAT:", f)
writeLines(" ascat: 3.0.0",f)
close(f)
"""
stub:
def prefix = task.ext.prefix ?: "${meta.id}"
"""
echo stub > ${prefix}.cnvs.txt
echo stub > ${prefix}.purityploidy.txt
echo stub > ${prefix}.segments.txt
echo stub > Tumour.ASCATprofile.png
echo stub > Tumour.ASPCF.png
echo stub > Tumour.germline.png
echo stub > Tumour.rawprofile.png
echo stub > Tumour.sunrise.png
echo stub > Tumour.tumour.png
echo 'ASCAT:' > versions.yml
echo ' ascat: 3.0.0' >> versions.yml
"""
}

92
modules/ascat/meta.yml Normal file
View file

@ -0,0 +1,92 @@
name: ascat
description: copy number profiles of tumour cells.
keywords:
- sort
tools:
- ascat:
description: ASCAT is a method to derive copy number profiles of tumour cells, accounting for normal cell admixture and tumour aneuploidy. ASCAT infers tumour purity (the fraction of tumour cells) and ploidy (the amount of DNA per tumour cell), expressed as multiples of haploid genomes from SNP array or massively parallel sequencing data, and calculates whole-genome allele-specific copy number profiles (the number of copies of both parental alleles for all SNP loci across the genome).
homepage: None
documentation: None
tool_dev_url: https://github.com/Crick-CancerGenomics/ascat
doi: "10.1093/bioinformatics/btaa538"
licence: ['GPL v3']
input:
- args:
type: map
description: |
Groovy Map containing tool parameters. MUST follow the structure/keywords below and be provided via modules.config. Parameters must be set between quotes. (optional) parameters can be removed from the map, if they are not set. For default values, please check the documentation above.
```
{
[
"gender": "XX",
"genomeVersion": "hg19"
"purity": (optional),
"ploidy": (optional),
"gc_files": (optional),
"minCounts": (optional),
"chrom_names": (optional),
"min_base_qual": (optional),
"min_map_qual": (optional),
"ref_fasta": (optional),
"skip_allele_counting_tumour": (optional),
"skip_allele_counting_normal": (optional)
]
}
```
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- input_normal:
type: file
description: BAM/CRAM/SAM file
pattern: "*.{bam,cram,sam}"
- index_normal:
type: file
description: index for normal_bam
pattern: "*.{bai}"
- input_tumor:
type: file
description: BAM/CRAM/SAM file
pattern: "*.{bam,cram,sam}"
- index_tumor:
type: file
description: index for tumor_bam
pattern: "*.{bai}"
- allele_files:
type: file
description: allele files for ASCAT. Can be downloaded here https://github.com/VanLoo-lab/ascat/tree/master/ReferenceFiles/WGS
- loci_files:
type: file
description: loci files for ASCAT. Can be downloaded here https://github.com/VanLoo-lab/ascat/tree/master/ReferenceFiles/WGS
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- png:
type: file
description: ASCAT plots
pattern: "*.{png}"
- purityploidy:
type: file
description: purity and ploidy data
pattern: "*.purityploidy.txt"
- segments:
type: file
description: segments data
pattern: "*.segments.txt"
authors:
- "@aasNGC"
- "@lassefolkersen"
- "@FriederikeHanssen"
- "@maxulysse"

View file

@ -0,0 +1,44 @@
process BISCUIT_ALIGN {
tag "$meta.id"
label 'process_high'
conda (params.enable_conda ? "bioconda::biscuit=1.0.2.20220113 bioconda::samtools=1.15" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0':
'quay.io/biocontainers/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0' }"
input:
tuple val(meta), path(reads)
path index
output:
tuple val(meta), path("*.bam"), emit: bam
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def biscuit_cpus = (int) Math.max(Math.floor(task.cpus*0.9),1)
def samtools_cpus = task.cpus-biscuit_cpus
"""
INDEX=`find -L ./ -name "*.bis.amb" | sed 's/.bis.amb//'`
biscuit align \\
$args \\
-@ $biscuit_cpus \\
\$INDEX \\
$reads \\
| samtools sort $args2 --threads $samtools_cpus -o ${prefix}.bam -
cat <<-END_VERSIONS > versions.yml
"${task.process}":
biscuit: \$( biscuit version |& sed '1!d; s/^.*BISCUIT Version: //' )
samtools: \$( samtools --version |& sed '1!d; s/^.*samtools //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,52 @@
name: biscuit_align
description: Aligns single- or paired-end reads from bisulfite-converted libraries to a reference genome using Biscuit.
keywords:
- biscuit
- DNA methylation
- WGBS
- scWGBS
- bisulfite sequencing
- aligner
- bam
tools:
- biscuit:
description: A utility for analyzing sodium bisulfite conversion-based DNA methylation/modification data
homepage: https://huishenlab.github.io/biscuit/
documentation: https://huishenlab.github.io/biscuit/docs/alignment
tool_dev_url: https://github.com/huishenlab/biscuit
doi: ""
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: |
List of input fastq files of size 1 and 2 for single-end and paired-end data,
respectively.
- index:
type: dir
description: Biscuit genome index directory (generated with 'biscuit index')
pattern: "BiscuitIndex"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: Output BAM file containing read alignments
pattern: "*.{bam}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@njspix"

View file

@ -0,0 +1,52 @@
process BISCUIT_BLASTER {
tag "$meta.id"
label 'process_high'
conda (params.enable_conda ? "bioconda::biscuit=1.0.2.20220113 bioconda::samblaster=0.1.26 bioconda::samtools=1.15" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0':
'quay.io/biocontainers/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0' }"
input:
tuple val(meta), path(reads)
path index
output:
tuple val(meta), path("*.bam"), emit: bam
tuple val(meta), path("*.bai"), emit: bai
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def prefix = task.ext.prefix ?: "${meta.id}"
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def args3 = task.ext.args3 ?: ''
def biscuit_cpus = (int) Math.max(Math.floor(task.cpus*0.95),1)
def samtools_cpus = task.cpus-biscuit_cpus
"""
INDEX=`find -L ./ -name "*.bis.amb" | sed 's/.bis.amb//'`
biscuit align \\
-@ $biscuit_cpus \\
$args \\
\$INDEX \\
$reads | \\
samblaster \\
$args2 | \\
samtools sort \\
-@ $samtools_cpus \\
$args3 \\
--write-index \\
-o ${prefix}.bam##idx##${prefix}.bam.bai
cat <<-END_VERSIONS > versions.yml
"${task.process}":
biscuit: \$( biscuit version |& sed '1!d; s/^.*BISCUIT Version: //' )
samtools: \$( samtools --version |& sed '1!d; s/^.*samtools //' )
samblaster: \$( samblaster --version |& sed 's/^.*samblaster: Version //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,78 @@
name: biscuit_blaster
description: A fast, compact one-liner to produce duplicate-marked, sorted, and indexed BAM files using Biscuit
keywords:
- biscuit
- DNA methylation
- WGBS
- scWGBS
- bisulfite sequencing
- aligner
- bam
tools:
- biscuit:
description: A utility for analyzing sodium bisulfite conversion-based DNA methylation/modification data
homepage: https://huishenlab.github.io/biscuit/
documentation: https://huishenlab.github.io/biscuit/biscuitblaster/
tool_dev_url: https://github.com/huishenlab/biscuit
doi: ""
licence: ["MIT"]
- samblaster:
description: |
samblaster is a fast and flexible program for marking duplicates in read-id grouped paired-end SAM files.
It can also optionally output discordant read pairs and/or split read mappings to separate SAM files,
and/or unmapped/clipped reads to a separate FASTQ file.
By default, samblaster reads SAM input from stdin and writes SAM to stdout.
homepage: None
documentation: https://github.com/GregoryFaust/samblaster
tool_dev_url: https://github.com/GregoryFaust/samblaster
doi: "10.1093/bioinformatics/btu314"
licence: ["MIT"]
- samtools:
description: |
SAMtools is a set of utilities for interacting with and post-processing
short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li.
These files are generated as output by short read aligners like BWA.
homepage: http://www.htslib.org/
documentation: hhttp://www.htslib.org/doc/samtools.html
doi: 10.1093/bioinformatics/btp352
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- reads:
type: file
description: |
List of input fastq files of size 1 and 2 for single-end and paired-end data,
respectively.
- index:
type: dir
description: Biscuit genome index directory (generated with 'biscuit index')
pattern: "BiscuitIndex"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: Output BAM file containing read alignments
pattern: "*.{bam}"
- bai:
type: file
description: Output BAM index
pattern: "*.{bai}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@njspix"

View file

@ -0,0 +1,39 @@
process BISCUIT_BSCONV {
tag "$meta.id"
label 'process_long'
conda (params.enable_conda ? "bioconda::biscuit=1.0.2.20220113" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/biscuit:1.0.2.20220113--h81a5ba2_0':
'quay.io/biocontainers/biscuit:1.0.2.20220113--h81a5ba2_0' }"
input:
tuple val(meta), path(bam), path(bai)
path(index)
output:
tuple val(meta), path("*.bam"), emit: bsconv_bam
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
if ("$bam" == "${prefix}.bam") error "Input and output names are the same, set prefix in module configuration to disambiguate!"
"""
INDEX=`find -L ./ -name "*.bis.amb" | sed 's/.bis.amb//'`
biscuit bsconv \\
$args \\
\$INDEX \\
$bam \\
${prefix}.bam
cat <<-END_VERSIONS > versions.yml
"${task.process}":
biscuit: \$( biscuit version |& sed '1!d; s/^.*BISCUIT Version: //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,55 @@
name: biscuit_bsconv
description: Summarize and/or filter reads based on bisulfite conversion rate
keywords:
- biscuit
- DNA methylation
- WGBS
- scWGBS
- bisulfite sequencing
- aligner
- bam
- filter
tools:
- biscuit:
description: A utility for analyzing sodium bisulfite conversion-based DNA methylation/modification data
homepage: https://huishenlab.github.io/biscuit/
documentation: https://huishenlab.github.io/biscuit/docs/subcommand_help.html#biscuit-bsconv
tool_dev_url: https://github.com/huishenlab/biscuit
doi: ""
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: BAM file contained mapped reads
- bai:
type: file
description: BAM file index
- index:
type: dir
description: Biscuit genome index directory (generated with 'biscuit index')
pattern: "BiscuitIndex"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bsconv_bam:
type: file
description: Output BAM file containing filtered read alignments
pattern: "*.{bam}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@njspix"

View file

@ -0,0 +1,57 @@
process BISCUIT_EPIREAD {
tag "$meta.id"
label 'process_long'
conda (params.enable_conda ? "bioconda::biscuit=1.0.2.20220113 bioconda::samtools=1.15" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0':
'quay.io/biocontainers/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0' }"
input:
tuple val(meta), path(bam), path(bai), path(snp_bed)
path(index)
output:
tuple val(meta), path("*.bed.gz"), emit: epiread_bed
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def biscuit_cpus = (int) Math.max(Math.floor(task.cpus*0.9),1)
def samtools_cpus = task.cpus-biscuit_cpus
// As of 2/25/22, epiread does not support reading a gzipped SNP BED file.
// This is a bit hacky but allows the user to supply a gzipped OR uncompressed bed file
def unzip_snp_bed = snp_bed && (snp_bed.toString() =~ /\.gz$/) ? "bgzip -d ${snp_bed}" : ""
def unzipped_snp_bed = snp_bed ? snp_bed.toString() - ~/\.gz$/: ""
// SNP BED input is optional
def options_snp_bed = snp_bed ? "-B ${unzipped_snp_bed}" : ""
if ("$options_snp_bed" == "${prefix}.bed.gz") error "Input and output names are the same, set prefix in module configuration to disambiguate!"
"""
INDEX=`find -L ./ -name "*.bis.amb" | sed 's/.bis.amb//'`
$unzip_snp_bed
biscuit epiread \\
-@ $biscuit_cpus \\
$args \\
$options_snp_bed \\
\$INDEX \\
$bam | \\
LC_ALL=C sort -k1,1 -k2,2n | \\
bgzip \\
-@ $samtools_cpus \\
$args2 \\
-c > ${prefix}.bed.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
biscuit: \$( biscuit version |& sed '1!d; s/^.*BISCUIT Version: //' )
samtools: \$( samtools --version |& sed '1!d; s/^.*samtools //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,58 @@
name: biscuit_epiread
description: |
Summarizes read-level methylation (and optionally SNV) information from a
Biscuit BAM file in a standard-compliant BED format.
keywords:
- biscuit
- DNA methylation
- WGBS
- scWGBS
- bisulfite sequencing
- aligner
- bam
tools:
- biscuit:
description: A utility for analyzing sodium bisulfite conversion-based DNA methylation/modification data
homepage: https://huishenlab.github.io/biscuit/
documentation: https://huishenlab.github.io/biscuit/epiread_format/
tool_dev_url: https://github.com/huishenlab/biscuit
doi: ""
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: Biscuit BAM file
- bai:
type: file
description: BAM index
- snp_bed:
type: file
description: BED file containing SNP information (optional)
- index:
type: dir
description: Biscuit genome index directory (generated with 'biscuit index')
pattern: "BiscuitIndex"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- epiread_bed:
type: file
description: Gzipped BED file with methylation (and optionally SNV) information
pattern: "*.{epiread.bed.gz}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@njspix"

View file

@ -0,0 +1,33 @@
process BISCUIT_INDEX {
tag "$fasta"
label 'process_long'
conda (params.enable_conda ? "bioconda::biscuit=1.0.2.20220113" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/biscuit:1.0.2.20220113--h81a5ba2_0':
'quay.io/biocontainers/biscuit:1.0.2.20220113--h81a5ba2_0' }"
input:
path fasta, stageAs: "BiscuitIndex/*"
output:
path "BiscuitIndex/*.fa*", emit: index, includeInputs: true
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
"""
biscuit \\
index \\
$args \\
$fasta
cat <<-END_VERSIONS > versions.yml
"${task.process}":
biscuit: \$( biscuit version |& sed '1!d; s/^.*BISCUIT Version: //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,38 @@
name: biscuit_index
description: Indexes a reference genome for use with Biscuit
keywords:
- biscuit
- DNA methylation
- WGBS
- scWGBS
- bisulfite sequencing
- index
- reference
- fasta
tools:
- biscuit:
description: A utility for analyzing sodium bisulfite conversion-based DNA methylation/modification data
homepage: https://huishenlab.github.io/biscuit/
documentation: https://huishenlab.github.io/biscuit/docs/alignment
tool_dev_url: https://github.com/huishenlab/biscuit
doi: ""
licence: ["MIT"]
input:
- fasta:
type: file
description: Input genome fasta file
output:
- index:
type: dir
description: Biscuit genome index directory
pattern: "BiscuitIndex"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@njspix"

View file

@ -0,0 +1,43 @@
process BISCUIT_MERGECG {
tag "$meta.id"
label 'process_long'
conda (params.enable_conda ? "bioconda::biscuit=1.0.2.20220113 bioconda::samtools=1.15" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0':
'quay.io/biocontainers/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0' }"
input:
tuple val(meta), path(bed)
path index
output:
tuple val(meta), path("*.bed.gz"), emit: mergecg_bed
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
INDEX=`find -L ./ -name "*.bis.amb" | sed 's/.bis.amb//'`
biscuit mergecg \\
$args \\
\$INDEX \\
$bed | \\
LC_ALL=C sort -k1,1 -k2,2n | \\
bgzip \\
$args2 \\
-c > ${prefix}.bed.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
biscuit: \$( biscuit version |& sed '1!d; s/^.*BISCUIT Version: //' )
samtools: \$( samtools --version |& sed '1!d; s/^.*samtools //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,51 @@
name: biscuit_mergecg
description: Merges methylation information for opposite-strand C's in a CpG context
keywords:
- biscuit
- DNA methylation
- WGBS
- scWGBS
- bisulfite sequencing
- aligner
- bed
tools:
- biscuit:
description: A utility for analyzing sodium bisulfite conversion-based DNA methylation/modification data
homepage: https://huishenlab.github.io/biscuit/
documentation: https://huishenlab.github.io/biscuit/docs/methylextraction.html
tool_dev_url: https://github.com/huishenlab/biscuit
doi: ""
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bed:
type: file
description: |
Biscuit BED file (output of biscuit vcf2bed)
- index:
type: dir
description: Biscuit genome index directory (generated with 'biscuit index')
pattern: "BiscuitIndex"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- mergecg_bed:
type: file
description: Gzipped BED file with merged methylation information
pattern: "*.bed.gz"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@njspix"

View file

@ -0,0 +1,45 @@
process BISCUIT_PILEUP {
tag "$meta.id"
label 'process_high'
conda (params.enable_conda ? "bioconda::biscuit=1.0.2.20220113 bioconda::samtools=1.15" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0':
'quay.io/biocontainers/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0' }"
input:
tuple val(meta), path(normal_bams), path(normal_bais), path(tumor_bam), path(tumor_bai)
path index
output:
tuple val(meta), path("*.vcf.gz"), emit: vcf
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def biscuit_cpus = (int) Math.max(Math.floor(task.cpus*0.9),1)
def bgzip_cpus = task.cpus-biscuit_cpus
if ( tumor_bam != [] && normal_bams.toList().size() > 1 ) error "[BISCUIT_PILEUP] error: Tumor BAM provided with more than one normal BAM"
if ( tumor_bam.toList().size() > 1 ) error "[BISCUIT_PILEUP] error: more than one tumor BAM provided"
input = ( tumor_bam==[] ) ? "${normal_bams}" : "-S -T ${tumor_bam} -I ${normal_bams}"
"""
INDEX=`find -L ./ -name "*.bis.amb" | sed 's/.bis.amb//'`
biscuit pileup \\
-@ $biscuit_cpus \\
$args \\
\$INDEX \\
$input \\
| bgzip -@ $bgzip_cpus $args2 > ${prefix}.vcf.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
biscuit: \$( biscuit version |& sed '1!d; s/^.*BISCUIT Version: //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,70 @@
name: biscuit_pileup
description: Computes cytosine methylation and callable SNV mutations, optionally in reference to a germline BAM to call somatic variants
keywords:
- bisulfite
- DNA methylation
- pileup
- variant calling
- WGBS
- scWGBS
- bam
- vcf
tools:
- biscuit:
description: A utility for analyzing sodium bisulfite conversion-based DNA methylation/modification data
homepage: https://huishenlab.github.io/biscuit/
documentation: https://huishenlab.github.io/biscuit/docs/pileup.html
tool_dev_url: https://github.com/huishenlab/biscuit
doi: ""
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- normal_bams:
type: file(s)
description: |
BAM files to be analyzed. If no tumor_bam file is provided, any number of "normal" BAMs may be provided
("normal" here is just a semantic issue, these BAMs could be from tumor or any other kind of tissue). If a
tumor BAM file is provided, exactly one normal (germline) BAM must be provided.
pattern: "*.{bam}"
- normal_bais:
type: file(s)
description: BAM index file or files corresponding to the provided normal_bams
pattern: "*.{bai}"
- tumor_bam:
type: file(s)
description: |
Optional. If a tumor BAM file is provided, pileup will run in "somatic" mode and will annotate variants with
their somatic state (present in tumor only, present in normal only, present in both, etc). Note that if a
tumor BAM file is provided, exactly one normal BAM must be provided.
pattern: "*.{bam}"
- tumor_bai:
type: file(s)
description: Optional. BAM index file corresponding to provided tumor_bam
pattern: "*.{bai}"
- index:
type: dir
description: Biscuit genome index directory (generated with 'biscuit index')
pattern: "BiscuitIndex"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
- vcf:
type: file
description: vcf file with methylation information
pattern: "*.{vcf.gz}"
authors:
- "@njspix"

View file

@ -0,0 +1,40 @@
process BISCUIT_QC {
tag "$meta.id"
label 'process_long'
conda (params.enable_conda ? "bioconda::biscuit=1.0.2.20220113" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/biscuit:1.0.2.20220113--h81a5ba2_0':
'quay.io/biocontainers/biscuit:1.0.2.20220113--h81a5ba2_0' }"
input:
tuple val(meta), path(bam)
path(index)
output:
tuple val(meta), path("*.txt"), emit: biscuit_qc_reports
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def se = meta.single_end ? "-s" : ""
"""
INDEX=`find -L ./ -name "*.bis.amb" | sed 's/.bis.amb//'`
biscuit qc \\
$args \\
$se \\
\$INDEX \\
$bam \\
$prefix
cat <<-END_VERSIONS > versions.yml
"${task.process}":
biscuit: \$( biscuit version |& sed '1!d; s/^.*BISCUIT Version: //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,51 @@
name: biscuit_qc
description: Perform basic quality control on a BAM file generated with Biscuit
keywords:
- biscuit
- DNA methylation
- WGBS
- scWGBS
- bisulfite sequencing
- index
- BAM
- quality control
tools:
- biscuit:
description: A utility for analyzing sodium bisulfite conversion-based DNA methylation/modification data
homepage: https://huishenlab.github.io/biscuit/
documentation: https://huishenlab.github.io/biscuit/docs/subcommand_help.html#biscuit-qc
tool_dev_url: https://github.com/huishenlab/biscuit
doi: ""
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bam:
type: file
description: BAM file produced using Biscuit
output:
- biscuit_qc_reports:
type: file
description: |
Summary files containing the following information:
- CpG retention by position in read
- CpH retention by position in read
- Read duplication statistics
- Insert size distribution
- Distribution of mapping qualities
- Proportion of reads mapping to each strand
- Read-averaged cytosine conversion rate for CpA, CpC, CpG, and CpT
pattern: "*.txt"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@njspix"

View file

@ -0,0 +1,39 @@
process BISCUIT_VCF2BED {
tag "$meta.id"
label 'process_long'
conda (params.enable_conda ? "bioconda::biscuit=1.0.2.20220113 bioconda::samtools=1.15" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0':
'quay.io/biocontainers/mulled-v2-db16f1c237a26ea9245cf9924f858974ff321d6e:17fa66297f088a1bc7560b7b90dc273bf23f2d8c-0' }"
input:
tuple val(meta), path(vcf)
output:
tuple val(meta), path("*.bed.gz"), emit: bed
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def args2 = task.ext.args2 ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
"""
biscuit vcf2bed \\
$args \\
$vcf | \\
LC_ALL=C sort -k1,1 -k2,2n | \\
bgzip \\
$args2 \\
-c > ${prefix}.bed.gz
cat <<-END_VERSIONS > versions.yml
"${task.process}":
biscuit: \$(echo \$(biscuit version 2>&1) | sed 's/^.*BISCUIT Version: //; s/Using.*\$//')
samtools: \$( samtools --version |& sed '1!d; s/^.*samtools //' )
END_VERSIONS
"""
}

View file

@ -0,0 +1,48 @@
name: biscuit_vcf2bed
description: |
Summarizes methylation or SNV information from a Biscuit VCF in a
standard-compliant BED file.
keywords:
- biscuit
- DNA methylation
- WGBS
- scWGBS
- bisulfite sequencing
- aligner
- vcf
tools:
- biscuit:
description: A utility for analyzing sodium bisulfite conversion-based DNA methylation/modification data
homepage: https://huishenlab.github.io/biscuit/
documentation: https://huishenlab.github.io/biscuit/docs/methylextraction.html
tool_dev_url: https://github.com/huishenlab/biscuit
doi: ""
licence: ["MIT"]
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- vcf:
type: file
description: Biscuit vcf file (output of biscuit pileup)
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- bed:
type: file
description: Gzipped BED file with methylation or SNV information
pattern: "*.{bed.gz}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@njspix"

View file

@ -0,0 +1,33 @@
process CNVPYTOR_CALLCNVS {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:A1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
input:
tuple val(meta), path(pytor)
output:
tuple val(meta), path("*.tsv"), emit: cnvs
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: '1000'
def prefix = task.ext.prefix ?: "${meta.id}"
"""
cnvpytor \\
-root $pytor \\
-call $args > ${prefix}.tsv
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS
"""
}

View file

@ -0,0 +1,40 @@
name: cnvpytor_callcnvs
description: command line tool for calling CNVs in whole genome sequencing data
- CNV calling
tools:
- cnvpytor:
description: calling CNVs using read depth
homepage: https://github.com/abyzovlab/CNVpytor
documentation: https://github.com/abyzovlab/CNVpytor
tool_dev_url: https://github.com/abyzovlab/CNVpytor
doi: "10.1101/2021.01.27.428472v1"
licence: ['MIT']
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test']
- pytor:
type: file
description: cnvpytor root file
pattern: "*.{pytor}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- cnvs:
type: file
description: file containing identified copy numer variations
pattern: "*.{tsv}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@sima-r"

View file

@ -0,0 +1,32 @@
process CNVPYTOR_HISTOGRAM {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:A1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
input:
tuple val(meta), path(pytor)
output:
tuple val(meta), path("${pytor.baseName}.pytor") , emit: pytor
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: '1000'
"""
cnvpytor \\
-root $pytor \\
-his $args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS
"""
}

View file

@ -0,0 +1,42 @@
name: cnvpytor_histogram
description: calculates read depth histograms
keywords:
- cnv calling
- histogram
tools:
- cnvpytor:
description: calling CNVs using read depth
homepage: https://github.com/abyzovlab/CNVpytor
documentation: https://github.com/abyzovlab/CNVpytor
tool_dev_url: https://github.com/abyzovlab/CNVpytor
doi: "10.1101/2021.01.27.428472v1"
licence: ['MIT']
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- pytor:
type: file
description: pytor file containing read depth data
pattern: "*.{pytor}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- pytor:
type: file
description: pytor file containing read depth histograms binned based on given bin size(s)
pattern: "*.{pytor}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@sima-r"

View file

@ -0,0 +1,38 @@
process CNVPYTOR_IMPORTREADDEPTH {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:A1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
input:
tuple val(meta), path(input_file), path(index)
path fasta
path fai
output:
tuple val(meta), path("*.pytor") , emit: pytor
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: ''
def prefix = task.ext.prefix ?: "${meta.id}"
def reference = fasta ? "-T ${fasta}" : ''
"""
cnvpytor \\
-root ${prefix}.pytor \\
-rd $input_file \\
$args \\
$reference
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS
"""
}

View file

@ -0,0 +1,55 @@
name: cnvpytor_importreaddepth
description: command line tool for CNV/CNA analysis. This step imports the read depth data into a root pytor file.
keywords:
- read depth
- cnv calling
tools:
- cnvpytor -rd:
description: calling CNVs using read depth
homepage: https://github.com/abyzovlab/CNVpytor
documentation: https://github.com/abyzovlab/CNVpytor
tool_dev_url: https://github.com/abyzovlab/CNVpytor
doi: "10.1101/2021.01.27.428472v1"
licence: ['MIT']
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- input_file:
type: file
description: BAM/CRAM/SAM file
pattern: "*.{bam,cram}"
- index:
type: file
description: bam file index
pattern: "*.{bai,crai}"
- fasta:
type: file
description: specifies reference genome file (only for cram file without reference genome)
pattern: "*.{fasta,fasta.gz,fa,fa.gz}"
- fai:
type: file
description: Index of reference fasta file
pattern: "*.fai"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- pytor:
type: file
description: read depth root file in which read depth data binned to 100 base pair bins will be stored.
pattern: "*.{pytor}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@sima-r"

View file

@ -0,0 +1,32 @@
process CNVPYTOR_PARTITION {
tag "$meta.id"
label 'process_medium'
conda (params.enable_conda ? "bioconda::cnvpytor=1.0" : null)
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/cnvpytor:A1.0--py39h6a678da_2':
'quay.io/biocontainers/cnvpytor:1.0--py39h6a678da_2' }"
input:
tuple val(meta), path(pytor)
output:
tuple val(meta), path("${pytor.baseName}.pytor"), emit: pytor
path "versions.yml" , emit: versions
when:
task.ext.when == null || task.ext.when
script:
def args = task.ext.args ?: '1000'
"""
cnvpytor \\
-root $pytor \\
-partition $args
cat <<-END_VERSIONS > versions.yml
"${task.process}":
cnvpytor: \$(echo \$(cnvpytor --version 2>&1) | sed 's/^.*pyCNVnator //; s/Using.*\$//' ))
END_VERSIONS
"""
}

View file

@ -0,0 +1,42 @@
name: cnvpytor_partition
description: partitioning read depth histograms
keywords:
- cnv calling
- partition histograms
tools:
- cnvpytor:
description: calling CNVs using read depth
homepage: https://github.com/abyzovlab/CNVpytor
documentation: https://github.com/abyzovlab/CNVpytor
tool_dev_url: https://github.com/abyzovlab/CNVpytor
doi: "10.1101/2021.01.27.428472v1"
licence: ['MIT']
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- pytor:
type: file
description: pytor file containing read depth data
pattern: "*.{pytor}"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test' ]
- partitions:
type: file
description: pytor file containing partitions of read depth histograms using mean-shift method
pattern: "*.{pytor}"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
authors:
- "@sima-r"

View file

@ -20,50 +20,50 @@ input:
type: map type: map
description: | description: |
Groovy Map containing tool parameters. MUST follow the structure/keywords below and be provided via modules.config. Groovy Map containing tool parameters. MUST follow the structure/keywords below and be provided via modules.config.
<optional> parameters can be removed from the map, if they are not set. All value must be surrounded by quotes, meta map parameters can be set with, i.e. sex = meta.sex: Parameters marked as (optional) can be removed from the map, if they are not set. All values must be surrounded by quotes, meta map parameters can be set with, i.e. `sex = meta.sex`:
For default values, please check the documentation above. For default values, please check the documentation above.
``` ```
{ {
[ [
"general" :[ "general" :[
"bedgraphoutput": <optional>, "bedgraphoutput": (optional),
"breakpointthreshold": <optional>, "breakpointthreshold": (optional),
"breakpointtype": <optional>, "breakpointtype": (optional),
"coefficientofvariation": <optional>, "coefficientofvariation": (optional),
"contamination": <optional>, "contamination": (optional),
"contaminationadjustment": <optional>, "contaminationadjustment": (optional),
"degree": <optional>, "degree": (optional),
"forcegccontentnormalization": <optional>, "forcegccontentnormalization": (optional),
"gccontentprofile": <optional>, "gccontentprofile": (optional),
"intercept": <optional>, "intercept": (optional),
"mincnalength": <optional>, "mincnalength": (optional),
"minmappabilityperwindow": <optional>, "minmappabilityperwindow": (optional),
"minexpectedgc": <optional>, "minexpectedgc": (optional),
"maxexpectedgc": <optional>, "maxexpectedgc": (optional),
"minimalsubclonepresence": <optional>, "minimalsubclonepresence": (optional),
"noisydata": <optional>, "noisydata": (optional),
"ploidy": <optional>, "ploidy": (optional),
"printNA": <optional>, "printNA": (optional),
"readcountthreshold": <optional >, "readcountthreshold": (optional),
"sex": <optional>, "sex": (optional),
"step": <optional value>, "step": (optional),
"telocentromeric": <optional>, "telocentromeric": (optional),
"uniquematch": <optional>, "uniquematch": (optional),
"window": <optional> "window": (optional)
], ],
"control":[ "control":[
"inputformat": <required>, "inputformat": (required),
"mateorientation": <optional>, "mateorientation": (optional),
], ],
"sample":[ "sample":[
"inputformat": <required>, "inputformat": (required),
"mateorientation": <optional>, "mateorientation": (optional),
], ],
"BAF":[ "BAF":[
"minimalcoverageperposition": <optional>, "minimalcoverageperposition": (optional),
"minimalqualityperposition": <optional>, "minimalqualityperposition": (optional),
"shiftinquality": <optional> "shiftinquality": (optional)
] ]
] ]
} }

View file

@ -7,6 +7,12 @@ process DEEPARG_DOWNLOADDATA {
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/deeparg:1.0.2--pyhdfd78af_1' : 'https://depot.galaxyproject.org/singularity/deeparg:1.0.2--pyhdfd78af_1' :
'quay.io/biocontainers/deeparg:1.0.2--pyhdfd78af_1' }" 'quay.io/biocontainers/deeparg:1.0.2--pyhdfd78af_1' }"
/*
We have to force singularity to run with --fakeroot to allow reading of a problematic file with borked read-write permissions in an upstream dependency (theanos).
This flag may not be available on all systems and may be considered a security problem. so please document and /or warn for this in your pipeline!
*/
containerOptions { "${workflow.containerEngine}" == 'singularity' ? '--fakeroot' : '' }
input: input:

View file

@ -8,6 +8,11 @@ process DEEPARG_PREDICT {
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity//deeparg:1.0.2--pyhdfd78af_1' : 'https://depot.galaxyproject.org/singularity//deeparg:1.0.2--pyhdfd78af_1' :
'quay.io/biocontainers/deeparg:1.0.2--pyhdfd78af_1' }" 'quay.io/biocontainers/deeparg:1.0.2--pyhdfd78af_1' }"
/*
We have to force singularity to run with --fakeroot to allow reading of a problematic file with borked read-write permissions in an upstream dependency (theanos).
This flag may not be available on all systems and may be considered a security problem. so please document and /or warn for this in your pipeline!
*/
containerOptions { "${workflow.containerEngine}" == 'singularity' ? '--fakeroot' : '' }
input: input:
tuple val(meta), path(fasta), val(model) tuple val(meta), path(fasta), val(model)

View file

@ -8,7 +8,7 @@ process OPTITYPE {
'quay.io/biocontainers/optitype:1.3.5--0' }" 'quay.io/biocontainers/optitype:1.3.5--0' }"
input: input:
tuple val(meta), path(bam) tuple val(meta), path(bam), path(bai)
output: output:
tuple val(meta), path("${prefix}"), emit: output tuple val(meta), path("${prefix}"), emit: output

View file

@ -22,6 +22,10 @@ input:
type: file type: file
description: BAM file description: BAM file
pattern: "*.{bam}" pattern: "*.{bam}"
- bai:
type: file
description: BAM index file
pattern: "*.{bai}"
output: output:
- meta: - meta:

View file

@ -28,5 +28,5 @@ conda { createTimeout = "120 min" }
includeConfig 'test_data.config' includeConfig 'test_data.config'
manifest { manifest {
nextflowVersion = '!>=21.10.3' nextflowVersion = '!>=21.10.0'
} }

View file

@ -46,6 +46,10 @@ artic/minion:
- modules/artic/minion/** - modules/artic/minion/**
- tests/modules/artic/minion/** - tests/modules/artic/minion/**
ascat:
- modules/ascat/**
- tests/modules/ascat/**
assemblyscan: assemblyscan:
- modules/assemblyscan/** - modules/assemblyscan/**
- tests/modules/assemblyscan/** - tests/modules/assemblyscan/**
@ -198,6 +202,49 @@ bedtools/subtract:
- modules/bedtools/subtract/** - modules/bedtools/subtract/**
- tests/modules/bedtools/subtract/** - tests/modules/bedtools/subtract/**
biscuit/align:
- modules/biscuit/index/**
- modules/biscuit/align/**
- tests/modules/biscuit/align/**
biscuit/biscuitblaster:
- modules/biscuit/index/**
- modules/biscuit/biscuitblaster/**
- tests/modules/biscuit/biscuitblaster/**
biscuit/bsconv:
- modules/biscuit/index/**
- modules/biscuit/bsconv/**
- tests/modules/biscuit/bsconv/**
biscuit/epiread:
- modules/biscuit/index/**
- modules/biscuit/epiread/**
- tests/modules/biscuit/epiread/**
biscuit/index:
- modules/biscuit/index/**
- tests/modules/biscuit/index/**
biscuit/mergecg:
- modules/biscuit/index/**
- modules/biscuit/mergecg/**
- tests/modules/biscuit/mergecg/**
biscuit/pileup:
- modules/biscuit/index/**
- modules/biscuit/pileup/**
- tests/modules/biscuit/pileup/**
biscuit/qc:
- modules/biscuit/index/**
- modules/biscuit/qc/**
- tests/modules/biscuit/qc/**
biscuit/vcf2bed:
- modules/biscuit/vcf2bed/**
- tests/modules/biscuit/vcf2bed/**
biobambam/bammarkduplicates2: biobambam/bammarkduplicates2:
- modules/biobambam/bammarkduplicates2/** - modules/biobambam/bammarkduplicates2/**
- tests/modules/biobambam/bammarkduplicates2/** - tests/modules/biobambam/bammarkduplicates2/**

View file

@ -192,7 +192,8 @@ params {
test_paired_end_umi_unsorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test.paired_end.umi_unsorted.bam" test_paired_end_umi_unsorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test.paired_end.umi_unsorted.bam"
test_paired_end_umi_unsorted_tagged_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test.paired_end.unsorted_tagged.bam" test_paired_end_umi_unsorted_tagged_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/umi/test.paired_end.unsorted_tagged.bam"
test_paired_end_hla = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/example_hla_pe.bam" test_paired_end_hla = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/example_hla_pe.bam"
test_paired_end_hla_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/example_hla_pe.sorted.bam"
test_paired_end_hla_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/example_hla_pe.sorted.bam.bai"
test2_paired_end_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.sorted.bam" test2_paired_end_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.sorted.bam"
test2_paired_end_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.sorted.bam.bai" test2_paired_end_sorted_bam_bai = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.sorted.bam.bai"
test2_paired_end_name_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.name.sorted.bam" test2_paired_end_name_sorted_bam = "${test_data_dir}/genomics/homo_sapiens/illumina/bam/test2.paired_end.name.sorted.bam"
@ -295,6 +296,8 @@ params {
test_rnaseq_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/test.rnaseq.vcf" test_rnaseq_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/test.rnaseq.vcf"
test_sv_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/sv_query.vcf.gz" test_sv_vcf = "${test_data_dir}/genomics/homo_sapiens/illumina/vcf/sv_query.vcf.gz"
test_pytor = "${test_data_dir}/genomics/homo_sapiens/illumina/pytor/test.pytor"
} }
'pacbio' { 'pacbio' {
primers = "${test_data_dir}/genomics/homo_sapiens/pacbio/fasta/primers.fasta" primers = "${test_data_dir}/genomics/homo_sapiens/pacbio/fasta/primers.fasta"

View file

@ -3,13 +3,15 @@
nextflow.enable.dsl = 2 nextflow.enable.dsl = 2
include { ADAPTERREMOVAL } from '../../../modules/adapterremoval/main.nf' include { ADAPTERREMOVAL } from '../../../modules/adapterremoval/main.nf'
include { ADAPTERREMOVAL as ADAPTERREMOVAL_COLLAPSE } from '../../../modules/adapterremoval/main.nf'
workflow test_adapterremoval_single_end { workflow test_adapterremoval_single_end {
input = [ [ id:'test', single_end:true, collapse:false ], // meta map input = [ [ id:'test', single_end:true, collapse:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true)
] ]
ADAPTERREMOVAL ( input ) ADAPTERREMOVAL ( input, [] )
} }
workflow test_adapterremoval_paired_end { workflow test_adapterremoval_paired_end {
@ -18,15 +20,15 @@ workflow test_adapterremoval_paired_end {
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ] file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
] ]
ADAPTERREMOVAL ( input ) ADAPTERREMOVAL ( input, [] )
} }
workflow test_adapterremoval_paired_end_collapse { workflow test_adapterremoval_paired_end_collapse {
input = [ [ id:'test', single_end:false, collapse:true ], // meta map input = [ [ id:'test', single_end:false ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true), [ file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ] file(params.test_data['sarscov2']['illumina']['test_2_fastq_gz'], checkIfExists: true) ]
] ]
ADAPTERREMOVAL ( input ) ADAPTERREMOVAL_COLLAPSE ( input, [] )
} }

View file

@ -2,4 +2,8 @@ process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" } publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: ADAPTERREMOVAL_COLLAPSE {
ext.args = "--collapse"
}
} }

View file

@ -23,7 +23,6 @@
md5sum: 294a6277f0139bd597e57c6fa31f39c7 md5sum: 294a6277f0139bd597e57c6fa31f39c7
- path: output/adapterremoval/test.pair2.truncated.gz - path: output/adapterremoval/test.pair2.truncated.gz
md5sum: de7b38e2c881bced8671acb1ab452d78 md5sum: de7b38e2c881bced8671acb1ab452d78
- path: output/adapterremoval/test.singleton.truncated.gz
- path: output/adapterremoval/versions.yml - path: output/adapterremoval/versions.yml
md5sum: fa621c887897da5a379c719399c17db7 md5sum: fa621c887897da5a379c719399c17db7
@ -32,13 +31,15 @@
tags: tags:
- adapterremoval - adapterremoval
files: files:
- path: output/adapterremoval/test.collapsed.gz
md5sum: ff956de3532599a56c3efe5369f0953f
- path: output/adapterremoval/test.collapsed.truncated.gz
- path: output/adapterremoval/test.discarded.gz - path: output/adapterremoval/test.discarded.gz
- path: output/adapterremoval/test.log - path: output/adapterremoval/test.log
md5sum: b8a451d3981b327f3fdb44f40ba2d6d1 md5sum: 7f0b2328152226e46101a535cce718b3
- path: output/adapterremoval/test.pair1.truncated.gz - path: output/adapterremoval/test.pair1.truncated.gz
md5sum: 294a6277f0139bd597e57c6fa31f39c7 md5sum: 683be19bc1c83008944b6b719bfa34e1
- path: output/adapterremoval/test.pair2.truncated.gz - path: output/adapterremoval/test.pair2.truncated.gz
md5sum: de7b38e2c881bced8671acb1ab452d78 md5sum: e6548fe061f3ef86368b26da930174d0
- path: output/adapterremoval/test.singleton.truncated.gz
- path: output/adapterremoval/versions.yml - path: output/adapterremoval/versions.yml
md5sum: fd428f92a8446e0b34c5ae1c447215b8 md5sum: 78f589bb313c8da0147ca8ce77d7f3bf

View file

@ -0,0 +1,64 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { ASCAT as ASCAT_SIMPLE} from '../../../modules/ascat/main.nf'
include { ASCAT as ASCAT_PLOIDY_AND_PURITY} from '../../../modules/ascat/main.nf'
include { ASCAT as ASCAT_CRAM} from '../../../modules/ascat/main.nf'
workflow test_ascat {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam_bai'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_bam_bai'], checkIfExists: true)
]
ASCAT_SIMPLE ( input , [], [])
}
// extended tests running with 1000 genomes data. Data is downloaded as follows:
// wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase1/data/HG00154/alignment/HG00154.mapped.ILLUMINA.bwa.GBR.low_coverage.20101123.bam
// wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase1/data/HG00154/alignment/HG00154.mapped.ILLUMINA.bwa.GBR.low_coverage.20101123.bam.bai
// wget http://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase1/data/HG00155/alignment/HG00155.mapped.ILLUMINA.bwa.GBR.low_coverage.20101123.bam
// wget http://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase1/data/HG00155/alignment/HG00155.mapped.ILLUMINA.bwa.GBR.low_coverage.20101123.bam.bai
//workflow test_ascat_with_ploidy_and_purity {
// input = [
// [ id:'test', single_end:false ], // meta map
// file("/home/ec2-user/input_files/bams/HG00154.mapped.ILLUMINA.bwa.GBR.low_coverage.20101123.bam", checkIfExists: true),
// file("/home/ec2-user/input_files/bams/HG00154.mapped.ILLUMINA.bwa.GBR.low_coverage.20101123.bam.bai", checkIfExists: true),
// file("/home/ec2-user/input_files/bams/test2.bam", checkIfExists: true),
// file("/home/ec2-user/input_files/bams/test2.bam.bai", checkIfExists: true)
// ]
//
// ASCAT_PLOIDY_AND_PURITY ( input , "/home/ec2-user/input_files/allele_files/G1000_alleles_hg19_chr", "/home/ec2-user/input_files/loci_files/G1000_alleles_hg19_chr")
//}
// extended tests running with 1000 genomes data. Data is downloaded as follows:
// wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/HG00145/alignment/HG00145.mapped.ILLUMINA.bwa.GBR.low_coverage.20120522.bam.cram.crai
// wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/HG00145/alignment/HG00145.mapped.ILLUMINA.bwa.GBR.low_coverage.20120522.bam.cram
// wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/HG00146/alignment/HG00146.mapped.ILLUMINA.bwa.GBR.low_coverage.20120522.bam.cram.crai
// wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/HG00146/alignment/HG00146.mapped.ILLUMINA.bwa.GBR.low_coverage.20120522.bam.cram
//workflow test_ascat_with_crams {
// input = [
// [ id:'test', single_end:false ], // meta map
// file("/home/ec2-user/input_files/crams/HG00145.mapped.ILLUMINA.bwa.GBR.low_coverage.20120522.bam.cram", checkIfExists: true),
// file("/home/ec2-user/input_files/crams/HG00145.mapped.ILLUMINA.bwa.GBR.low_coverage.20120522.bam.cram.crai", checkIfExists: true),
// file("/home/ec2-user/input_files/crams/duplicate_test.cram", checkIfExists: true),
// file("/home/ec2-user/input_files/crams/duplicate_test.cram.crai", checkIfExists: true)
// ]
//
// ASCAT_CRAM ( input , "/home/ec2-user/input_files/allele_files/G1000_alleles_hg19_chr", "/home/ec2-user/input_files/loci_files/G1000_alleles_hg19_chr")
//}

View file

@ -0,0 +1,39 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: ASCAT_SIMPLE {
ext.args = [
gender : 'XY',
genomeVersion : 'hg19',
minCounts : '1',
min_base_qual : '1',
min_map_qual : '1',
chrom_names : 'c("21","22")'
]
}
withName: ASCAT_PLOIDY_AND_PURITY {
ext.args = [
gender : 'XX',
genomeVersion : 'hg19',
ploidy : '1.7',
purity : '0.24',
chrom_names : 'c("21","22")'
]
}
withName: ASCAT_CRAM {
ext.args = [
gender : 'XX',
genomeVersion : 'hg19',
ref_fasta : '/home/ec2-user/input_files/fasta/human_g1k_v37.fasta',
chrom_names : 'c("21","22")'
]
}
}

View file

@ -0,0 +1,25 @@
- name: ascat test_ascat
command: nextflow run tests/modules/ascat -entry test_ascat -c tests/config/nextflow.config -stub-run
tags:
- ascat
files:
- path: output/ascat/Tumour.ASCATprofile.png
md5sum: f50b84b1db4b83ba62ec1deacc69c260
- path: output/ascat/Tumour.ASPCF.png
md5sum: f50b84b1db4b83ba62ec1deacc69c260
- path: output/ascat/Tumour.germline.png
md5sum: f50b84b1db4b83ba62ec1deacc69c260
- path: output/ascat/Tumour.rawprofile.png
md5sum: f50b84b1db4b83ba62ec1deacc69c260
- path: output/ascat/Tumour.sunrise.png
md5sum: f50b84b1db4b83ba62ec1deacc69c260
- path: output/ascat/Tumour.tumour.png
md5sum: f50b84b1db4b83ba62ec1deacc69c260
- path: output/ascat/test.cnvs.txt
md5sum: f50b84b1db4b83ba62ec1deacc69c260
- path: output/ascat/test.purityploidy.txt
md5sum: f50b84b1db4b83ba62ec1deacc69c260
- path: output/ascat/test.segments.txt
md5sum: f50b84b1db4b83ba62ec1deacc69c260
- path: output/ascat/versions.yml
md5sum: 1af20694ec11004c4f8bc0c609b06386

View file

@ -0,0 +1,33 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BISCUIT_INDEX } from '../../../../modules/biscuit/index/main.nf'
include { BISCUIT_ALIGN as BISCUIT_ALIGN_SE } from '../../../../modules/biscuit/align/main.nf'
include { BISCUIT_ALIGN as BISCUIT_ALIGN_PE } from '../../../../modules/biscuit/align/main.nf'
// Single-end test
workflow test_biscuit_align_single {
input = [ [ id:'test' ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_methylated_1_fastq_gz'], checkIfExists: true) ]
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX ( fasta )
BISCUIT_ALIGN_SE (input, BISCUIT_INDEX.out.index )
}
// paired-end test
workflow test_biscuit_align_paired {
input = [ [ id:'test' ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_methylated_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_methylated_2_fastq_gz'], checkIfExists: true) ]
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX ( fasta )
BISCUIT_ALIGN_SE (input, BISCUIT_INDEX.out.index )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,53 @@
- name: biscuit align test_biscuit_align_single
command: nextflow run tests/modules/biscuit/align -entry test_biscuit_align_single -c tests/config/nextflow.config
tags:
- biscuit
- biscuit/align
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/test.bam
md5sum: eb36532425cb9b259410d6464a9e523a
- path: output/biscuit/versions.yml
md5sum: a86c4170bbf90cc75b93eb59ea124acd
- name: biscuit align test_biscuit_align_paired
command: nextflow run tests/modules/biscuit/align -entry test_biscuit_align_paired -c tests/config/nextflow.config
tags:
- biscuit
- biscuit/align
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/test.bam
md5sum: be3f6aa86c499d6a6b2996e5936e4f50
- path: output/biscuit/versions.yml
md5sum: f0b7dffd28f5e6bb1466fce6661d133f

View file

@ -0,0 +1,32 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BISCUIT_INDEX } from '../../../../modules/biscuit/index/main.nf'
include { BISCUIT_BLASTER as BISCUIT_BLASTER_SE } from '../../../../modules/biscuit/biscuitblaster/main.nf'
include { BISCUIT_BLASTER as BISCUIT_BLASTER_PE } from '../../../../modules/biscuit/biscuitblaster/main.nf'
// Single-end test
workflow test_biscuit_blaster_single {
input = [ [ id:'test' ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_methylated_1_fastq_gz'], checkIfExists: true) ]
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX ( fasta )
BISCUIT_BLASTER_SE (input, BISCUIT_INDEX.out.index )
}
// paired-end test
workflow test_biscuit_blaster_paired {
input = [ [ id:'test' ], // meta map
[ file(params.test_data['sarscov2']['illumina']['test_methylated_1_fastq_gz'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_methylated_2_fastq_gz'], checkIfExists: true) ]
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX ( fasta )
BISCUIT_BLASTER_PE (input, BISCUIT_INDEX.out.index )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,57 @@
- name: biscuit biscuitblaster test_biscuit_blaster_single
command: nextflow run tests/modules/biscuit/biscuitblaster -entry test_biscuit_blaster_single -c tests/config/nextflow.config
tags:
- biscuit/biscuitblaster
- biscuit
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/test.bam
md5sum: 9ece50b67349382d38b20c2702e65675
- path: output/biscuit/test.bam.bai
md5sum: 8f14bb42fd38cc7ce4a3c3a9d7133ea4
- path: output/biscuit/versions.yml
md5sum: bfb660b5b0d92dde6817a1c6a2a302bb
- name: biscuit biscuitblaster test_biscuit_blaster_paired
command: nextflow run tests/modules/biscuit/biscuitblaster -entry test_biscuit_blaster_paired -c tests/config/nextflow.config
tags:
- biscuit/biscuitblaster
- biscuit
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/test.bam
md5sum: 0c6de35f38003df6ea5dd036170df91b
- path: output/biscuit/test.bam.bai
md5sum: 0d76977b2e36046cc176112776c5fa4e
- path: output/biscuit/versions.yml
md5sum: 82160a7ad29ccc3a21e59b1869399c04

View file

@ -0,0 +1,19 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BISCUIT_INDEX } from '../../../../modules/biscuit/index/main.nf'
include { BISCUIT_BSCONV } from '../../../../modules/biscuit/bsconv/main.nf'
workflow test_biscuit_bsconv {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_methylated_sorted_bam'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_paired_end_methylated_sorted_bam_bai'], checkIfExists: true)
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX( fasta )
BISCUIT_BSCONV ( input, BISCUIT_INDEX.out.index )
}

View file

@ -0,0 +1,10 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: '.*BISCUIT_BSCONV' {
ext.args = '-f 0.1'
}
}

View file

@ -0,0 +1,26 @@
- name: biscuit bsconv test_biscuit_bsconv
command: nextflow run tests/modules/biscuit/bsconv -entry test_biscuit_bsconv -c tests/config/nextflow.config
tags:
- biscuit
- biscuit/bsconv
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/test.bam
md5sum: e33e9498d00dd32222b90a6bd981226f
- path: output/biscuit/versions.yml
md5sum: 7deec1f096203542bbb72ac4fa05f9ba

View file

@ -0,0 +1,48 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BISCUIT_INDEX } from '../../../../modules/biscuit/index/main.nf'
include { BISCUIT_EPIREAD } from '../../../../modules/biscuit/epiread/main.nf'
workflow test_biscuit_epiread_nosnp {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_methylated_sorted_bam'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_paired_end_methylated_sorted_bam_bai'], checkIfExists: true),
[] //SNP BED file
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX( fasta )
BISCUIT_EPIREAD ( input, BISCUIT_INDEX.out.index )
}
workflow test_biscuit_epiread_snp {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_methylated_sorted_bam'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_paired_end_methylated_sorted_bam_bai'], checkIfExists: true),
file('https://github.com/nf-core/test-datasets/raw/modules/data/delete_me/biscuit/test-snp.bed')
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX( fasta )
BISCUIT_EPIREAD ( input, BISCUIT_INDEX.out.index )
}
workflow test_biscuit_epiread_snp_decompress {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_methylated_sorted_bam'], checkIfExists: true),
file(params.test_data['sarscov2']['illumina']['test_paired_end_methylated_sorted_bam_bai'], checkIfExists: true),
file('https://github.com/nf-core/test-datasets/raw/modules/data/delete_me/biscuit/test-snp.bed.gz')
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX( fasta )
BISCUIT_EPIREAD ( input, BISCUIT_INDEX.out.index )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,80 @@
- name: biscuit epiread test_biscuit_epiread_nosnp
command: nextflow run tests/modules/biscuit/epiread -entry test_biscuit_epiread_nosnp -c tests/config/nextflow.config
tags:
- biscuit
- biscuit/epiread
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/test.bed.gz
md5sum: dbb30b59f4ef6fdfdee38630225c0574
- path: output/biscuit/versions.yml
md5sum: 674a77ac5ca8f4b42d30e58e30c3a9af
- name: biscuit epiread test_biscuit_epiread_snp
command: nextflow run tests/modules/biscuit/epiread -entry test_biscuit_epiread_snp -c tests/config/nextflow.config
tags:
- biscuit
- biscuit/epiread
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/test.bed.gz
md5sum: a29fea6ad74453ec94f8220747dab906
- path: output/biscuit/versions.yml
md5sum: f2f7c4ff3c6a135b1c8a3aff24a44d81
- name: biscuit epiread test_biscuit_epiread_snp_decompress
command: nextflow run tests/modules/biscuit/epiread -entry test_biscuit_epiread_snp_decompress -c tests/config/nextflow.config
tags:
- biscuit
- biscuit/epiread
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/test.bed.gz
md5sum: a29fea6ad74453ec94f8220747dab906
- path: output/biscuit/versions.yml
md5sum: cb0258ebf4e1a731a4310ec17c3dc442

View file

@ -0,0 +1,12 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BISCUIT_INDEX } from '../../../../modules/biscuit/index/main.nf'
workflow test_biscuit_index {
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX ( fasta )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,24 @@
- name: biscuit index test_biscuit_index
command: nextflow run tests/modules/biscuit/index -entry test_biscuit_index -c tests/config/nextflow.config
tags:
- biscuit/index
- biscuit
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/versions.yml
md5sum: 5c5873e482a57966db246648ffddf62f

View file

@ -0,0 +1,18 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BISCUIT_INDEX } from '../../../../modules/biscuit/index/main.nf'
include { BISCUIT_MERGECG } from '../../../../modules/biscuit/mergecg/main.nf'
workflow test_biscuit_mergecg {
input = [
[ id:'test', single_end:false ], // meta map
file('https://github.com/nf-core/test-datasets/raw/modules/data/delete_me/biscuit/test-cg.bed.gz', checkIfExists: true)
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX( fasta )
BISCUIT_MERGECG ( input, BISCUIT_INDEX.out.index )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,26 @@
- name: biscuit mergecg test_biscuit_mergecg
command: nextflow run tests/modules/biscuit/mergecg -entry test_biscuit_mergecg -c tests/config/nextflow.config
tags:
- biscuit
- biscuit/mergecg
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/test.bed.gz
md5sum: d693b28ddc81265f388860d391fc7c5b
- path: output/biscuit/versions.yml
md5sum: f670d63671af06bf8654677bf373b3a1

View file

@ -0,0 +1,38 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BISCUIT_INDEX } from '../../../../modules/biscuit/index/main.nf'
include { BISCUIT_PILEUP } from '../../../../modules/biscuit/pileup/main.nf'
workflow test_biscuit_pileup {
input = [ [ id:'test' ], // meta map
[file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_bam'], checkIfExists: true)],
[file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam_bai'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_bam_bai'], checkIfExists: true)],
[], //tumor bam
[] //tumor bai
]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX ( fasta )
BISCUIT_PILEUP ( input, BISCUIT_INDEX.out.index )
}
workflow test_biscuit_pileup_somatic {
input = [ [ id:'test' ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_bam_bai'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_bam_bai'], checkIfExists: true)
]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX ( fasta )
BISCUIT_PILEUP ( input, BISCUIT_INDEX.out.index )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,53 @@
- name: biscuit pileup test_biscuit_pileup
command: nextflow run tests/modules/biscuit/pileup -entry test_biscuit_pileup -c tests/config/nextflow.config
tags:
- biscuit
- biscuit/pileup
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: f315020d899597c1b57e5fe9f60f4c3e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 1891c1de381b3a96d4e72f590fde20c1
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: 2df4aa2d7580639fa0fcdbcad5e2e969
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 8569fbdb2c98c6fb16dfa73d8eacb070
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: 668799eea40aefb8013cbf8ed6c47cfe
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 10541b05bbea44d0344b0345a6522ba8
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 2c38edd64234420add133f5fe1ff975d
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 7deee1aac3395d93bef1df11ab38379e
- path: output/biscuit/test.vcf.gz
md5sum: ef9798c318ead0f8a79ee7fdeb1ffbf9
- path: output/biscuit/versions.yml
md5sum: ae38b891fdbf9f7ff5c486408f949dc5
- name: biscuit pileup test_biscuit_pileup_somatic
command: nextflow run tests/modules/biscuit/pileup -entry test_biscuit_pileup_somatic -c tests/config/nextflow.config
tags:
- biscuit
- biscuit/pileup
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: f315020d899597c1b57e5fe9f60f4c3e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 1891c1de381b3a96d4e72f590fde20c1
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: 2df4aa2d7580639fa0fcdbcad5e2e969
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 8569fbdb2c98c6fb16dfa73d8eacb070
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: 668799eea40aefb8013cbf8ed6c47cfe
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 10541b05bbea44d0344b0345a6522ba8
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 2c38edd64234420add133f5fe1ff975d
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 7deee1aac3395d93bef1df11ab38379e
- path: output/biscuit/test.vcf.gz
md5sum: 692b4a6191b08fabe5efa5abe00da420
- path: output/biscuit/versions.yml
md5sum: cc51fd498d67fdc7cc067686eb855b93

View file

@ -0,0 +1,18 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BISCUIT_INDEX } from '../../../../modules/biscuit/index/main.nf'
include { BISCUIT_QC } from '../../../../modules/biscuit/qc/main.nf'
workflow test_biscuit_qc {
input = [
[ id:'test', single_end:false ], // meta map
file(params.test_data['sarscov2']['illumina']['test_paired_end_methylated_sorted_bam'], checkIfExists: true)
]
fasta = file(params.test_data['sarscov2']['genome']['genome_fasta'], checkIfExists: true)
BISCUIT_INDEX( fasta )
BISCUIT_QC ( input, BISCUIT_INDEX.out.index )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,38 @@
- name: biscuit qc test_biscuit_qc
command: nextflow run tests/modules/biscuit/qc -entry test_biscuit_qc -c tests/config/nextflow.config
tags:
- biscuit/qc
- biscuit
files:
- path: output/biscuit/BiscuitIndex/genome.fasta
md5sum: 6e9fe4042a72f2345f644f239272b7e6
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.amb
md5sum: 3a68b8b2287e07dd3f5f95f4344ba76e
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.ann
md5sum: c32e11f6c859f166c7525a9c1d583567
- path: output/biscuit/BiscuitIndex/genome.fasta.bis.pac
md5sum: 983e3d2cd6f36e2546e6d25a0da78d66
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.bwt
md5sum: a11bc31775f7b7a4f9cd3bc4f981661a
- path: output/biscuit/BiscuitIndex/genome.fasta.dau.sa
md5sum: 9c9e07fa1c75ef32d764274579c89b08
- path: output/biscuit/BiscuitIndex/genome.fasta.par.bwt
md5sum: 62eb83cd557a47b59589713d98024fc2
- path: output/biscuit/BiscuitIndex/genome.fasta.par.sa
md5sum: 55bcd97d7059bf73dc0d221e36e8e901
- path: output/biscuit/test_CpGRetentionByReadPos.txt
md5sum: 498b6c0af196fb34c8835371b9e9b68a
- path: output/biscuit/test_CpHRetentionByReadPos.txt
md5sum: a266942c5719cecab7f60f63cbe7335d
- path: output/biscuit/test_dup_report.txt
md5sum: 65bddf4fbe9e40d7c6c976060df53e3b
- path: output/biscuit/test_isize_table.txt
md5sum: aadf6f2e271abc334b6146cf164bdda3
- path: output/biscuit/test_mapq_table.txt
md5sum: c8adaac84bb8db3b7f48e1ed4fccad00
- path: output/biscuit/test_strand_table.txt
md5sum: 27068382ba6b2dbf313169a85c9dbb3a
- path: output/biscuit/test_totalReadConversionRate.txt
md5sum: 8f0c1fceaebfa74f2757720e3bc85fed
- path: output/biscuit/versions.yml
md5sum: a730fa4888e6882cf1b8ba92645b04ee

View file

@ -0,0 +1,16 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { BISCUIT_VCF2BED } from '../../../../modules/biscuit/vcf2bed/main.nf'
workflow test_biscuit_vcf2bed {
input = [
[ id:'test', single_end:false ], // meta map
file('https://github.com/nf-core/test-datasets/raw/modules/data/delete_me/biscuit/test.vcf.gz', checkIfExists: true)
]
BISCUIT_VCF2BED ( input )
}

View file

@ -0,0 +1,5 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
}

View file

@ -0,0 +1,10 @@
- name: biscuit vcf2bed test_biscuit_vcf2bed
command: nextflow run tests/modules/biscuit/vcf2bed -entry test_biscuit_vcf2bed -c tests/config/nextflow.config
tags:
- biscuit/vcf2bed
- biscuit
files:
- path: output/biscuit/test.bed.gz
md5sum: e2dd492289dc8463f364285e31b9553a
- path: output/biscuit/versions.yml
md5sum: cd784276e2fb6739d55e1b60d12202cd

View file

@ -0,0 +1,15 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { CNVPYTOR_CALLCNVS } from '../../../../modules/cnvpytor/callcnvs/main.nf'
workflow test_cnvpytor_callcnvs {
input = [
[ id:'test'], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_pytor'], checkIfExists: true)
]
CNVPYTOR_CALLCNVS ( input )
}

View file

@ -0,0 +1,7 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: CNVPYTOR_CALLCNVS {
ext.args = '10000'
}
}

View file

@ -0,0 +1,10 @@
- name: cnvpytor callcnvs test_cnvpytor_callcnvs
command: nextflow run tests/modules/cnvpytor/callcnvs -entry test_cnvpytor_callcnvs -c tests/config/nextflow.config
tags:
- cnvpytor
- cnvpytor/callcnvs
files:
- path: output/cnvpytor/calls.10000.tsv
md5sum: d41d8cd98f00b204e9800998ecf8427e
- path: output/cnvpytor/versions.yml
md5sum: 5fe6ca3ef5c40f9dbf487f28db237821

View file

@ -0,0 +1,15 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { CNVPYTOR_HISTOGRAM } from '../../../../modules/cnvpytor/histogram/main.nf'
workflow test_cnvpytor_histogram {
input = [
[ id:'test'], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_pytor'], checkIfExists: true),
]
CNVPYTOR_HISTOGRAM ( input )
}

View file

@ -0,0 +1,7 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: CNVPYTOR_HISTOGRAM {
ext.args = '10000 100000'
}
}

View file

@ -0,0 +1,10 @@
- name: cnvpytor histogram test_cnvpytor_histogram
command: nextflow run tests/modules/cnvpytor/histogram -entry test_cnvpytor_histogram -c tests/config/nextflow.config
tags:
- cnvpytor
- cnvpytor/histogram
files:
- path: output/cnvpytor/test.pytor
md5sum: aa03a8fa15b39f77816705a48e10312a
- path: output/cnvpytor/versions.yml
md5sum: 9a4b176afd5f1a3edeb37eeb301cf464

View file

@ -0,0 +1,32 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { CNVPYTOR_IMPORTREADDEPTH } from '../../../../modules/cnvpytor/importreaddepth/main.nf'
workflow test_cnvpytor_importreaddepth {
input = [
[ id: 'test' ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test2_paired_end_sorted_bam_bai'], checkIfExists: true)
]
CNVPYTOR_IMPORTREADDEPTH (input, [], [])
}
workflow test_cnvpytor_importreaddepth_cram {
input = [
[ id: 'test' ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_sorted_cram_crai'], checkIfExists: true)
]
fasta = file(params.test_data['homo_sapiens']['genome']['genome_fasta'], checkIfExists: true)
fai = file(params.test_data['homo_sapiens']['genome']['genome_fasta_fai'], checkIfExists: true)
CNVPYTOR_IMPORTREADDEPTH (input, fasta, fai)
}

View file

@ -0,0 +1,12 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: CNVPYTOR_IMPORTREADDEPTH {
ext.args = {params.cnvpytor_chr ? "-chrom ${params.cnvpytor_chr}" : '' }
}
}
params {
cnvpytor_chr = '' // specifies chromosome name(s) the same way as they are described in the sam/bam/cram header e.g. '1 2' or 'chr1 chr2'.
}

View file

@ -0,0 +1,15 @@
#!/usr/bin/env nextflow
nextflow.enable.dsl = 2
include { CNVPYTOR_PARTITION } from '../../../../modules/cnvpytor/partition/main.nf'
workflow test_cnvpytor_partition {
input = [
[ id:'test'], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_pytor'], checkIfExists: true)
]
CNVPYTOR_PARTITION ( input )
}

View file

@ -0,0 +1,7 @@
process {
publishDir = { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }
withName: CNVPYTOR_PARTITION {
ext.args = '10000 100000'
}
}

View file

@ -0,0 +1,10 @@
- name: cnvpytor partition test_cnvpytor_partition
command: nextflow run tests/modules/cnvpytor/partition -entry test_cnvpytor_partition -c tests/config/nextflow.config
tags:
- cnvpytor
- cnvpytor/partition
files:
- path: output/cnvpytor/test.pytor
md5sum: aa03a8fa15b39f77816705a48e10312a
- path: output/cnvpytor/versions.yml
md5sum: 8a04506554c58cd170cc050fd9904c6f

View file

@ -6,7 +6,8 @@ include { OPTITYPE } from '../../../modules/optitype/main.nf'
workflow test_optitype { workflow test_optitype {
input = [ [ id:'test', seq_type:'dna' ], // meta map input = [ [ id:'test', seq_type:'dna' ], // meta map
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_hla'], checkIfExists: true) file(params.test_data['homo_sapiens']['illumina']['test_paired_end_hla_sorted_bam'], checkIfExists: true),
file(params.test_data['homo_sapiens']['illumina']['test_paired_end_hla_sorted_bam_bai'], checkIfExists: true)
] ]
OPTITYPE ( input ) OPTITYPE ( input )

View file

@ -6,4 +6,4 @@
- path: output/optitype/test/test_coverage_plot.pdf - path: output/optitype/test/test_coverage_plot.pdf
- path: output/optitype/test/test_result.tsv - path: output/optitype/test/test_result.tsv
contains: contains:
- "1446" - "1439"