mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-24 17:19:54 +00:00
Merge branch 'nf-core:master' into master
This commit is contained in:
commit
1ff92db608
27 changed files with 358 additions and 135 deletions
4
.github/workflows/linting.yml
vendored
4
.github/workflows/linting.yml
vendored
|
@ -7,9 +7,7 @@ jobs:
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-18.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- uses: actions/setup-node@v1
|
- uses: actions/setup-node@v2
|
||||||
with:
|
|
||||||
node-version: '10'
|
|
||||||
- name: Install markdownlint
|
- name: Install markdownlint
|
||||||
run: |
|
run: |
|
||||||
npm install -g markdownlint-cli
|
npm install -g markdownlint-cli
|
||||||
|
|
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
|
@ -47,13 +47,13 @@ jobs:
|
||||||
- 'icr_davros'
|
- 'icr_davros'
|
||||||
- 'ifb_core'
|
- 'ifb_core'
|
||||||
- 'imperial'
|
- 'imperial'
|
||||||
- 'imperial_mb'
|
|
||||||
- 'jax'
|
- 'jax'
|
||||||
- 'lugh'
|
- 'lugh'
|
||||||
- 'maestro'
|
- 'maestro'
|
||||||
- 'mpcdf'
|
- 'mpcdf'
|
||||||
- 'munin'
|
- 'munin'
|
||||||
- 'nu_genomics'
|
- 'nu_genomics'
|
||||||
|
- 'nihbiowulf'
|
||||||
- 'oist'
|
- 'oist'
|
||||||
- 'pasteur'
|
- 'pasteur'
|
||||||
- 'phoenix'
|
- 'phoenix'
|
||||||
|
|
|
@ -113,12 +113,14 @@ Currently documentation is available for the following systems:
|
||||||
* [HASTA](docs/hasta.md)
|
* [HASTA](docs/hasta.md)
|
||||||
* [HEBBE](docs/hebbe.md)
|
* [HEBBE](docs/hebbe.md)
|
||||||
* [ICR_DAVROS](docs/icr_davros.md)
|
* [ICR_DAVROS](docs/icr_davros.md)
|
||||||
|
* [IMPERIAL](docs/imperial.md)
|
||||||
* [JAX](docs/jax.md)
|
* [JAX](docs/jax.md)
|
||||||
* [LUGH](docs/lugh.md)
|
* [LUGH](docs/lugh.md)
|
||||||
* [MAESTRO](docs/maestro.md)
|
* [MAESTRO](docs/maestro.md)
|
||||||
* [MPCDF](docs/mpcdf.md)
|
* [MPCDF](docs/mpcdf.md)
|
||||||
* [MUNIN](docs/munin.md)
|
* [MUNIN](docs/munin.md)
|
||||||
* [NU_GENOMICS](docs/nu_genomics.md)
|
* [NU_GENOMICS](docs/nu_genomics.md)
|
||||||
|
* [NIHBIOWULF](docs/nihbiowulf.md)
|
||||||
* [OIST](docs/oist.md)
|
* [OIST](docs/oist.md)
|
||||||
* [PASTEUR](docs/pasteur.md)
|
* [PASTEUR](docs/pasteur.md)
|
||||||
* [PHOENIX](docs/phoenix.md)
|
* [PHOENIX](docs/phoenix.md)
|
||||||
|
@ -190,6 +192,8 @@ Currently documentation is available for the following pipelines within specific
|
||||||
* sarek
|
* sarek
|
||||||
* [MUNIN](docs/pipeline/sarek/munin.md)
|
* [MUNIN](docs/pipeline/sarek/munin.md)
|
||||||
* [UPPMAX](docs/pipeline/sarek/uppmax.md)
|
* [UPPMAX](docs/pipeline/sarek/uppmax.md)
|
||||||
|
* rnavar
|
||||||
|
* [MUNIN](docs/pipeline/rnavar/munin.md)
|
||||||
|
|
||||||
### Pipeline-specific documentation
|
### Pipeline-specific documentation
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//Nextflow config file for running on Azure batch
|
//Nextflow config file for running on Azure batch
|
||||||
params {
|
params {
|
||||||
config_profile_description = 'AWSBATCH Cloud Profile'
|
config_profile_description = 'Azure BATCH Cloud Profile'
|
||||||
config_profile_contact = 'Venkat Malladi (@vsmalladi)'
|
config_profile_contact = 'Venkat Malladi (@vsmalladi)'
|
||||||
config_profile_url = 'https://azure.microsoft.com/services/batch/'
|
config_profile_url = 'https://azure.microsoft.com/services/batch/'
|
||||||
|
|
||||||
|
|
|
@ -7,11 +7,9 @@ params {
|
||||||
|
|
||||||
process {
|
process {
|
||||||
executor = 'slurm'
|
executor = 'slurm'
|
||||||
queue = { task.memory <= 170.GB ? 'c' : 'm' }
|
queue = { task.memory <= 120.GB ? 'c' : 'm' }
|
||||||
module = 'anaconda3/2019.10'
|
module = ['build-env/.f2021', 'build-env/f2021', 'anaconda3/2021.11']
|
||||||
|
clusterOptions = { ( task.queue == 'g' ? '--gres gpu:1 ' : '' ) << ( (task.queue == 'c' & task.time <= 1.h) ? '--qos rapid' : ( task.time <= 8.h ? '--qos short': ( task.time <= 48.h ? '--qos medium' : '--qos long' ) ) ) }
|
||||||
// --signal option will be handled by nextflow after 21.10.0 release (see https://github.com/nextflow-io/nextflow/issues/2163)
|
|
||||||
clusterOptions = { '--signal B:USR2 ' << ( (queue == 'c' & task.time <= 1.h) ? '--qos rapid' : ( task.time <= 8.h ? '--qos short': ( task.time <= 48.h ? '--qos medium' : '--qos long' ) ) ) }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
|
|
|
@ -30,7 +30,7 @@ profiles {
|
||||||
config_profile_description = 'MPI-EVA archgen profile, provided by nf-core/configs.'
|
config_profile_description = 'MPI-EVA archgen profile, provided by nf-core/configs.'
|
||||||
max_memory = 256.GB
|
max_memory = 256.GB
|
||||||
max_cpus = 32
|
max_cpus = 32
|
||||||
max_time = 720.h
|
max_time = 365.d
|
||||||
//Illumina iGenomes reference file path
|
//Illumina iGenomes reference file path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,37 +1,80 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
|
|
||||||
params {
|
params {
|
||||||
// Config Params
|
// Config Params
|
||||||
config_profile_description = 'Imperial College London - HPC Profile -- provided by nf-core/configs.'
|
config_profile_description = 'Imperial College London - HPC Profile -- provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
||||||
config_profile_url = 'https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/'
|
config_profile_url = 'https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/'
|
||||||
|
|
||||||
// Resources
|
// Resources
|
||||||
max_memory = 256.GB
|
max_memory = 480.GB
|
||||||
max_cpus = 32
|
max_cpus = 40
|
||||||
max_time = 72.h
|
max_time = 1000.h
|
||||||
|
}
|
||||||
|
|
||||||
|
profiles {
|
||||||
|
imperial {
|
||||||
|
process {
|
||||||
|
executor = 'pbspro'
|
||||||
|
|
||||||
|
// Process-specific resource requirements
|
||||||
|
withLabel:process_low {
|
||||||
|
// TARGET QUEUE: throughput
|
||||||
|
cpus = { 2 * task.attempt }
|
||||||
|
memory = { 12.GB * task.attempt }
|
||||||
|
time = { 4.h * task.attempt }
|
||||||
|
}
|
||||||
|
withLabel:process_medium {
|
||||||
|
// TARGET QUEUE: throughput
|
||||||
|
cpus = 8
|
||||||
|
memory = { 32.GB * task.attempt }
|
||||||
|
time = { 8.h * task.attempt }
|
||||||
|
}
|
||||||
|
withLabel:process_high {
|
||||||
|
// TARGET QUEUE: general
|
||||||
|
cpus = 32
|
||||||
|
memory = { 62.GB * task.attempt }
|
||||||
|
time = { 16.h * task.attempt }
|
||||||
|
}
|
||||||
|
withLabel:process_long {
|
||||||
|
// TARGET QUEUE: long
|
||||||
|
cpus = 8
|
||||||
|
memory = 96.GB
|
||||||
|
time = { 72.h * task.attempt }
|
||||||
|
}
|
||||||
|
withLabel:process_high_memory {
|
||||||
|
// TARGET QUEUE: large memory
|
||||||
|
cpus = { 10 * task.attempt }
|
||||||
|
memory = { 120.GB * task.attempt }
|
||||||
|
time = { 12.h * task.attempt }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
medbio {
|
||||||
|
process {
|
||||||
|
executor = 'pbspro'
|
||||||
|
|
||||||
|
queue = 'pqmedbio-tput'
|
||||||
|
|
||||||
|
//queue = 'med-bio' //!! this is an alias and shouldn't be used
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
executor {
|
executor {
|
||||||
$pbspro {
|
$pbspro {
|
||||||
queueSize = 50
|
queueSize = 50
|
||||||
}
|
}
|
||||||
|
|
||||||
$local {
|
$local {
|
||||||
cpus = 2
|
cpus = 2
|
||||||
queueSize = 1
|
queueSize = 1
|
||||||
memory = '32 GB'
|
memory = '6 GB'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
enabled = true
|
enabled = true
|
||||||
autoMounts = true
|
autoMounts = true
|
||||||
runOptions = "-B /rds/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
runOptions = "-B /rds/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
||||||
}
|
|
||||||
|
|
||||||
process {
|
|
||||||
|
|
||||||
executor = 'pbspro'
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,44 +0,0 @@
|
||||||
//Profile config names for nf-core/configs
|
|
||||||
|
|
||||||
params {
|
|
||||||
// Config Params
|
|
||||||
config_profile_description = 'Imperial College London - MEDBIO QUEUE - HPC Profile -- provided by nf-core/configs.'
|
|
||||||
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
|
||||||
config_profile_url = 'https://www.imperial.ac.uk/bioinformatics-data-science-group/resources/uk-med-bio/'
|
|
||||||
|
|
||||||
// Resources
|
|
||||||
max_memory = 640.GB
|
|
||||||
max_cpus = 32
|
|
||||||
max_time = 168.h
|
|
||||||
}
|
|
||||||
|
|
||||||
executor {
|
|
||||||
$pbspro {
|
|
||||||
queueSize = 50
|
|
||||||
}
|
|
||||||
|
|
||||||
$local {
|
|
||||||
cpus = 2
|
|
||||||
queueSize = 1
|
|
||||||
memory = '32 GB'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
singularity {
|
|
||||||
enabled = true
|
|
||||||
autoMounts = true
|
|
||||||
runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
|
||||||
}
|
|
||||||
|
|
||||||
process {
|
|
||||||
|
|
||||||
executor = 'pbspro'
|
|
||||||
queue = 'pqmedbio-tput'
|
|
||||||
|
|
||||||
//queue = 'med-bio' //!! this is an alias and shouldn't be used
|
|
||||||
|
|
||||||
withLabel:process_large {
|
|
||||||
queue = 'pqmedbio-large'
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -18,6 +18,8 @@ profiles {
|
||||||
|
|
||||||
executor {
|
executor {
|
||||||
queueSize = 8
|
queueSize = 8
|
||||||
|
pollInterval = '1 min'
|
||||||
|
queueStatInterval = '5 min'
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set $NXF_SINGULARITY_CACHEDIR in your ~/.bash_profile
|
// Set $NXF_SINGULARITY_CACHEDIR in your ~/.bash_profile
|
||||||
|
@ -46,6 +48,8 @@ profiles {
|
||||||
|
|
||||||
executor {
|
executor {
|
||||||
queueSize = 8
|
queueSize = 8
|
||||||
|
pollInterval = '1 min'
|
||||||
|
queueStatInterval = '5 min'
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set $NXF_SINGULARITY_CACHEDIR in your ~/.bash_profile
|
// Set $NXF_SINGULARITY_CACHEDIR in your ~/.bash_profile
|
||||||
|
@ -58,7 +62,7 @@ profiles {
|
||||||
params {
|
params {
|
||||||
config_profile_description = 'MPCDF raven profile (unofficially) provided by nf-core/configs.'
|
config_profile_description = 'MPCDF raven profile (unofficially) provided by nf-core/configs.'
|
||||||
max_memory = 368.GB
|
max_memory = 368.GB
|
||||||
max_cpus = 192
|
max_cpus = 72
|
||||||
max_time = 24.h
|
max_time = 24.h
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,9 @@ process {
|
||||||
}
|
}
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
enabled = true
|
cacheDir = '/data1/containers/'
|
||||||
cacheDir = '/data1/containers/'
|
enabled = true
|
||||||
|
runOptions = "--bind /media/BTB_2021_01"
|
||||||
}
|
}
|
||||||
|
|
||||||
// To use docker, use nextflow run -profile munin,docker
|
// To use docker, use nextflow run -profile munin,docker
|
||||||
|
|
42
conf/nihbiowulf.config
Normal file
42
conf/nihbiowulf.config
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
//Profile config names for nf-core/configs
|
||||||
|
params {
|
||||||
|
config_profile_description = 'National Institutes of Health, USA: Biowulf nf-core config'
|
||||||
|
config_profile_contact = 'Kevin Brick (@kevbrick)'
|
||||||
|
config_profile_url = 'https://hpc.nih.gov/apps/nextflow.html'
|
||||||
|
max_memory = 224.GB
|
||||||
|
max_cpus = 32
|
||||||
|
max_time = 72.h
|
||||||
|
|
||||||
|
igenomes_base = '/fdb/igenomes/'
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
scratch = '/lscratch/$SLURM_JOBID'
|
||||||
|
maxForks = 100
|
||||||
|
}
|
||||||
|
|
||||||
|
profiles {
|
||||||
|
local {
|
||||||
|
process.executor = 'local'
|
||||||
|
}
|
||||||
|
|
||||||
|
slurm {
|
||||||
|
process.executor = 'slurm'
|
||||||
|
executor.$slurm.pollInterval = '1 min'
|
||||||
|
executor.$slurm.queueStatInterval = '5 min'
|
||||||
|
executor.queueSize = 100
|
||||||
|
executor.$slurm.submitRateLimit = '6/1min'
|
||||||
|
process.clusterOptions = ' --gres=lscratch:600 --signal USR2@20'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
autoMounts = true
|
||||||
|
envWhitelist='https_proxy,http_proxy,ftp_proxy,DISPLAY,SLURM_JOBID'
|
||||||
|
|
||||||
|
//As names change, the bind paths required may change too. To chack that everything is being captured:
|
||||||
|
//Run '. /usr/local/current/singularity/app_conf/sing_binds' to populate $SINGULARITY_BINDPATH
|
||||||
|
//Check that each folder in $SINGULARITY_BINDPATH is listed with -B in the runOptions below. If not, add it.
|
||||||
|
runOptions = ' -B /gs10 -B /gs11 -B /gs12 -B /gs4 -B /gs6 -B /gs7 -B /gs8 -B /gs9 -B /vf -B /spin1 -B /data -B /fdb -B /lscratch -B /fdb/igenomes/ --env TMPDIR="/lscratch/$SLURM_JOBID" '
|
||||||
|
}
|
|
@ -19,57 +19,60 @@ process {
|
||||||
withLabel:'sc_tiny'{
|
withLabel:'sc_tiny'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 1.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 1.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_small'{
|
withLabel:'sc_small'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_medium'{
|
withLabel:'sc_medium'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_small'{
|
withLabel:'mc_small'{
|
||||||
cpus = { check_max( 2, 'cpus' ) }
|
cpus = { check_max( 2, 'cpus' ) }
|
||||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_medium' {
|
withLabel:'mc_medium' {
|
||||||
cpus = { check_max( 4, 'cpus' ) }
|
cpus = { check_max( 4, 'cpus' ) }
|
||||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_large'{
|
withLabel:'mc_large'{
|
||||||
cpus = { check_max( 8, 'cpus' ) }
|
cpus = { check_max( 8, 'cpus' ) }
|
||||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_huge'{
|
withLabel:'mc_huge'{
|
||||||
cpus = { check_max( 32, 'cpus' ) }
|
cpus = { check_max( 32, 'cpus' ) }
|
||||||
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fixes for SGE and Java incompatibility due to Java using more memory than you tell it to use
|
// Fixes for SGE and Java incompatibility due to Java using more memory than you tell it to use
|
||||||
|
|
||||||
withName: makeSeqDict {
|
withName: makeSeqDict {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: fastqc {
|
withName: fastqc {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: adapter_removal {
|
withName: adapter_removal {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: samtools_flagstat {
|
withName: samtools_flagstat {
|
||||||
|
@ -82,10 +85,13 @@ process {
|
||||||
|
|
||||||
withName: dedup {
|
withName: dedup {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: markduplicates {
|
withName: markduplicates {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
memory = { check_max( 20.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: library_merge {
|
withName: library_merge {
|
||||||
|
@ -100,6 +106,10 @@ process {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
withName: metagenomic_complexity_filter {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
}
|
||||||
|
|
||||||
withName: malt {
|
withName: malt {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=1000G,virtual_free=1000G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=1000G,virtual_free=1000G" }
|
||||||
|
@ -109,34 +119,43 @@ process {
|
||||||
|
|
||||||
withName: maltextract {
|
withName: maltextract {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: multivcfanalyzer {
|
withName: multivcfanalyzer {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: mtnucratio {
|
withName: mtnucratio {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: vcf2genome {
|
withName: vcf2genome {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: qualimap {
|
withName: qualimap {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : task.exitStatus in [255] ? 'ignore' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: damageprofiler {
|
withName: damageprofiler {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circularmapper {
|
withName: circularmapper {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circulargenerator {
|
withName: circulargenerator {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: preseq {
|
withName: preseq {
|
||||||
|
@ -146,6 +165,7 @@ process {
|
||||||
|
|
||||||
withName: genotyping_ug {
|
withName: genotyping_ug {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -176,57 +196,53 @@ profiles {
|
||||||
withLabel:'sc_tiny'{
|
withLabel:'sc_tiny'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 1.5.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 1.5.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_small'{
|
withLabel:'sc_small'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_medium'{
|
withLabel:'sc_medium'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_small'{
|
withLabel:'mc_small'{
|
||||||
cpus = { check_max( 2, 'cpus' ) }
|
cpus = { check_max( 2, 'cpus' ) }
|
||||||
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_medium' {
|
withLabel:'mc_medium' {
|
||||||
cpus = { check_max( 4, 'cpus' ) }
|
cpus = { check_max( 4, 'cpus' ) }
|
||||||
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_large'{
|
withLabel:'mc_large'{
|
||||||
cpus = { check_max( 8, 'cpus' ) }
|
cpus = { check_max( 8, 'cpus' ) }
|
||||||
memory = { check_max( 24.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 24.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_huge'{
|
withLabel:'mc_huge'{
|
||||||
cpus = { check_max( 32, 'cpus' ) }
|
cpus = { check_max( 32, 'cpus' ) }
|
||||||
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fixes for SGE and Java incompatibility due to (and also some samtools?!) using more memory than you tell it to use
|
// Fixes for SGE and Java incompatibility due to (and also some samtools?!) using more memory than you tell it to use
|
||||||
|
|
||||||
withName: makeSeqDict {
|
withName: makeSeqDict {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: fastqc {
|
withName: fastqc {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: adapter_removal {
|
withName: adapter_removal {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: samtools_flagstat {
|
withName: samtools_flagstat {
|
||||||
|
@ -239,10 +255,13 @@ profiles {
|
||||||
|
|
||||||
withName: dedup {
|
withName: dedup {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: markduplicates {
|
withName: markduplicates {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: library_merge {
|
withName: library_merge {
|
||||||
|
@ -258,51 +277,65 @@ profiles {
|
||||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
withName: metagenomic_complexity_filter {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
}
|
||||||
|
|
||||||
withName: malt {
|
withName: malt {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=1000G,virtual_free=1000G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=1000G,virtual_free=1000G" }
|
||||||
cpus = { check_max( 32, 'cpus' ) }
|
cpus = { check_max( 32, 'cpus' ) }
|
||||||
memory = { check_max( 955.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 955.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName:hostremoval_input_fastq {
|
withName:hostremoval_input_fastq {
|
||||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||||
time = 1440.h
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: maltextract {
|
withName: maltextract {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: multivcfanalyzer {
|
withName: multivcfanalyzer {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: mtnucratio {
|
withName: mtnucratio {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: vcf2genome {
|
withName: vcf2genome {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: qualimap {
|
withName: qualimap {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : task.exitStatus in [255] ? 'ignore' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: damageprofiler {
|
withName: damageprofiler {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circularmapper {
|
withName: circularmapper {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circulargenerator {
|
withName: circulargenerator {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: genotyping_ug {
|
withName: genotyping_ug {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: preseq {
|
withName: preseq {
|
||||||
|
@ -337,57 +370,53 @@ profiles {
|
||||||
withLabel:'sc_tiny'{
|
withLabel:'sc_tiny'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 2.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 2.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_small'{
|
withLabel:'sc_small'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_medium'{
|
withLabel:'sc_medium'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_small'{
|
withLabel:'mc_small'{
|
||||||
cpus = { check_max( 2, 'cpus' ) }
|
cpus = { check_max( 2, 'cpus' ) }
|
||||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_medium' {
|
withLabel:'mc_medium' {
|
||||||
cpus = { check_max( 4, 'cpus' ) }
|
cpus = { check_max( 4, 'cpus' ) }
|
||||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_large'{
|
withLabel:'mc_large'{
|
||||||
cpus = { check_max( 8, 'cpus' ) }
|
cpus = { check_max( 8, 'cpus' ) }
|
||||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_huge'{
|
withLabel:'mc_huge'{
|
||||||
cpus = { check_max( 32, 'cpus' ) }
|
cpus = { check_max( 32, 'cpus' ) }
|
||||||
memory = { check_max( 512.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 512.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fixes for SGE and Java incompatibility due to Java using more memory than you tell it to use
|
// Fixes for SGE and Java incompatibility due to Java using more memory than you tell it to use
|
||||||
|
|
||||||
withName: makeSeqDict {
|
withName: makeSeqDict {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: fastqc {
|
withName: fastqc {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: adapter_removal {
|
withName: adapter_removal {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: samtools_flagstat {
|
withName: samtools_flagstat {
|
||||||
|
@ -400,10 +429,13 @@ profiles {
|
||||||
|
|
||||||
withName: dedup {
|
withName: dedup {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: markduplicates {
|
withName: markduplicates {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
memory = { check_max( 48.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: library_merge {
|
withName: library_merge {
|
||||||
|
@ -422,49 +454,63 @@ profiles {
|
||||||
|
|
||||||
withName:hostremoval_input_fastq {
|
withName:hostremoval_input_fastq {
|
||||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||||
time = 1440.h
|
}
|
||||||
|
|
||||||
|
withName: metagenomic_complexity_filter {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: malt {
|
withName: malt {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=1000G,virtual_free=1000G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=1000G,virtual_free=1000G" }
|
||||||
cpus = { check_max( 32, 'cpus' ) }
|
cpus = { check_max( 32, 'cpus' ) }
|
||||||
memory = { check_max( 955.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 955.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: maltextract {
|
withName: maltextract {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: multivcfanalyzer {
|
withName: multivcfanalyzer {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: mtnucratio {
|
withName: mtnucratio {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: vcf2genome {
|
withName: vcf2genome {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: qualimap {
|
withName: qualimap {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : task.exitStatus in [255] ? 'ignore' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: damageprofiler {
|
withName: damageprofiler {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circularmapper {
|
withName: circularmapper {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circulargenerator {
|
withName: circulargenerator {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: genotyping_ug {
|
withName: genotyping_ug {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: preseq {
|
withName: preseq {
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
process {
|
process {
|
||||||
|
|
||||||
withName:'PICARD_MARKDUPLICATES' {
|
withName:'PICARD_MARKDUPLICATES' {
|
||||||
cpus = { check_max( 13 * task.attempt, 'cpus' ) }
|
memory = { check_max( 90.GB * task.attempt, 'memory' ) }
|
||||||
memory = { check_max( 130.GB * task.attempt, 'memory' ) }
|
|
||||||
}
|
}
|
||||||
withName:'DEEPVARIANT' {
|
withName:'DEEPVARIANT' {
|
||||||
cpus = { check_max( 16 * task.attempt, 'cpus' ) }
|
cpus = { check_max( 16 * task.attempt, 'cpus' ) }
|
||||||
|
|
44
conf/pipeline/rnavar/munin.config
Normal file
44
conf/pipeline/rnavar/munin.config
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
// rnavar/munin specific profile config
|
||||||
|
|
||||||
|
params {
|
||||||
|
// Specific nf-core/configs params
|
||||||
|
config_profile_contact = 'Praveen Raj (@praveenraj2018)'
|
||||||
|
config_profile_description = 'nf-core/rnavar MUNIN profile provided by nf-core/configs'
|
||||||
|
config_profile_url = 'https://ki.se/forskning/barntumorbanken'
|
||||||
|
|
||||||
|
// Specific nf-core/rnavar params
|
||||||
|
|
||||||
|
igenomes_ignore = true
|
||||||
|
|
||||||
|
// Genome references
|
||||||
|
genome = 'GRCh38'
|
||||||
|
fasta = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa'
|
||||||
|
fasta_fai = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa.fai'
|
||||||
|
gtf = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.gtf'
|
||||||
|
gene_bed = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.bed'
|
||||||
|
|
||||||
|
// Known genome resources
|
||||||
|
dbsnp = '/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz'
|
||||||
|
dbsnp_tbi = '/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz.tbi'
|
||||||
|
known_indels = '/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz'
|
||||||
|
known_indels_tbi = '/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz.tbi'
|
||||||
|
|
||||||
|
// STAR index
|
||||||
|
star_index = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/STAR.2.7.9a_2x151bp/'
|
||||||
|
read_length = 151
|
||||||
|
|
||||||
|
// Annotation settings
|
||||||
|
annotation_cache = true
|
||||||
|
cadd_cache = true
|
||||||
|
cadd_indels = '/data1/cache/CADD/v1.4/InDels.tsv.gz'
|
||||||
|
cadd_indels_tbi = '/data1/cache/CADD/v1.4/InDels.tsv.gz.tbi'
|
||||||
|
cadd_wg_snvs = '/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz'
|
||||||
|
cadd_wg_snvs_tbi = '/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz.tbi'
|
||||||
|
snpeff_cache = '/data1/cache/snpEff/'
|
||||||
|
snpeff_db = 'GRCh38.99'
|
||||||
|
vep_cache = '/data1/cache/VEP/'
|
||||||
|
vep_genome = 'GRCh38'
|
||||||
|
vep_species = 'homo_sapiens'
|
||||||
|
vep_cache_version = '99'
|
||||||
|
|
||||||
|
}
|
|
@ -2,6 +2,8 @@ process {
|
||||||
|
|
||||||
withName:MapReads {
|
withName:MapReads {
|
||||||
cpus = 16
|
cpus = 16
|
||||||
|
memory = 128.GB
|
||||||
|
clusterOptions = {"-l h_vmem=${(task.memory + 8.GB).bytes/task.cpus}"}
|
||||||
}
|
}
|
||||||
withName:BuildDict {
|
withName:BuildDict {
|
||||||
cpus = 1
|
cpus = 1
|
||||||
|
|
|
@ -23,7 +23,7 @@ params {
|
||||||
// Specific nf-core/sarek process configuration
|
// Specific nf-core/sarek process configuration
|
||||||
process {
|
process {
|
||||||
withLabel:sentieon {
|
withLabel:sentieon {
|
||||||
module = {params.sentieon ? 'sentieon/201911.00' : null}
|
module = {params.sentieon ? 'sentieon/202112.00' : null}
|
||||||
container = {params.sentieon ? null : container}
|
container = {params.sentieon ? null : container}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
params {
|
params {
|
||||||
// Config Params
|
// Config Params
|
||||||
config_profile_description = 'Imperial College London - HPC - nf-core/scFlow Profile -- provided by nf-core/configs.'
|
config_profile_description = 'Imperial College London - HPC - nf-core/scFlow Profile -- provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
config_profile_contact = 'NA'
|
||||||
|
|
||||||
// Analysis Resource Params
|
// Analysis Resource Params
|
||||||
ctd_folder = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/Analyses/scFlowResources/refs/ctd"
|
ctd_folder = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/Analyses/scFlowResources/refs/ctd"
|
||||||
|
@ -15,4 +15,4 @@ singularity {
|
||||||
autoMounts = true
|
autoMounts = true
|
||||||
cacheDir = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/.singularity-cache"
|
cacheDir = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/.singularity-cache"
|
||||||
runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,10 +13,18 @@ params {
|
||||||
// Please use 'MN908947.3' if possible because all primer sets are available / have been pre-prepared relative to that assembly
|
// Please use 'MN908947.3' if possible because all primer sets are available / have been pre-prepared relative to that assembly
|
||||||
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.fna.gz'
|
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.fna.gz'
|
||||||
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.gff.gz'
|
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.gff.gz'
|
||||||
|
nextclade_dataset = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/nextclade_sars-cov-2_MN908947_2022-01-18T12_00_00Z.tar.gz'
|
||||||
|
nextclade_dataset_name = 'sars-cov-2'
|
||||||
|
nextclade_dataset_reference = 'MN908947'
|
||||||
|
nextclade_dataset_tag = '2022-01-18T12:00:00Z'
|
||||||
}
|
}
|
||||||
'MN908947.3' {
|
'MN908947.3' {
|
||||||
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.fna.gz'
|
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.fna.gz'
|
||||||
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
||||||
|
nextclade_dataset = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/nextclade_sars-cov-2_MN908947_2022-01-18T12_00_00Z.tar.gz'
|
||||||
|
nextclade_dataset_name = 'sars-cov-2'
|
||||||
|
nextclade_dataset_reference = 'MN908947'
|
||||||
|
nextclade_dataset_tag = '2022-01-18T12:00:00Z'
|
||||||
primer_sets {
|
primer_sets {
|
||||||
artic {
|
artic {
|
||||||
'1' {
|
'1' {
|
||||||
|
@ -43,6 +51,12 @@ params {
|
||||||
primer_bed = 'https://github.com/artic-network/artic-ncov2019/raw/master/primer_schemes/nCoV-2019/V4/SARS-CoV-2.scheme.bed'
|
primer_bed = 'https://github.com/artic-network/artic-ncov2019/raw/master/primer_schemes/nCoV-2019/V4/SARS-CoV-2.scheme.bed'
|
||||||
scheme = 'SARS-CoV-2'
|
scheme = 'SARS-CoV-2'
|
||||||
}
|
}
|
||||||
|
'4.1' {
|
||||||
|
fasta = 'https://github.com/artic-network/artic-ncov2019/raw/master/primer_schemes/nCoV-2019/V4.1/SARS-CoV-2.reference.fasta'
|
||||||
|
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
||||||
|
primer_bed = 'https://github.com/artic-network/artic-ncov2019/raw/master/primer_schemes/nCoV-2019/V4.1/SARS-CoV-2.scheme.bed'
|
||||||
|
scheme = 'SARS-CoV-2'
|
||||||
|
}
|
||||||
'1200' {
|
'1200' {
|
||||||
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/primer_schemes/artic/nCoV-2019/V1200/nCoV-2019.reference.fasta'
|
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/primer_schemes/artic/nCoV-2019/V1200/nCoV-2019.reference.fasta'
|
||||||
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
||||||
|
|
|
@ -9,7 +9,7 @@ Before running the pipeline you will need to load Nextflow using the environment
|
||||||
```bash
|
```bash
|
||||||
## Load Nextflow environment module
|
## Load Nextflow environment module
|
||||||
module purge
|
module purge
|
||||||
module load nextflow/19.04.0
|
module load nextflow/21.10.6
|
||||||
```
|
```
|
||||||
|
|
||||||
A local copy of the [AWS-iGenomes](https://registry.opendata.aws/aws-igenomes/) resource has been made available on CBE so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline. You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
A local copy of the [AWS-iGenomes](https://registry.opendata.aws/aws-igenomes/) resource has been made available on CBE so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline. You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
||||||
|
|
|
@ -125,7 +125,7 @@ For Human and Mouse, we use [GENCODE](https://www.gencodegenes.org/) gene annota
|
||||||
|
|
||||||
## High Priority Queue
|
## High Priority Queue
|
||||||
|
|
||||||
If you would like to run with the _High Priority_ queue, specify the `highpriority` config profile after `czbiohub_aws`. When applied after the main `czbiohub_aws` config, it overwrites the process `queue` identifier.
|
If you would like to run with the *High Priority* queue, specify the `highpriority` config profile after `czbiohub_aws`. When applied after the main `czbiohub_aws` config, it overwrites the process `queue` identifier.
|
||||||
|
|
||||||
To use it, submit your run with with `-profile czbiohub_aws,highpriority`.
|
To use it, submit your run with with `-profile czbiohub_aws,highpriority`.
|
||||||
|
|
||||||
|
|
|
@ -2,15 +2,17 @@
|
||||||
|
|
||||||
All nf-core pipelines have been successfully configured for use on the CX1 cluster at Imperial College London HPC.
|
All nf-core pipelines have been successfully configured for use on the CX1 cluster at Imperial College London HPC.
|
||||||
|
|
||||||
To use, run the pipeline with `-profile imperial`. This will download and launch the [`imperial.config`](../conf/imperial.config) which has been pre-configured with a setup suitable for the CX1 cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
To use, run the pipeline with `-profile imperial,standard`. This will download and launch the [`imperial.config`](../conf/imperial.config) which has been pre-configured with a setup suitable for the CX1 cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||||
|
|
||||||
Before running the pipeline you will need to load Nextflow using the environment module system on the CX1 cluster. You can do this by issuing the commands below:
|
Before running the pipeline you will need to load Nextflow using the environment module system on the CX1 cluster. You can do this by issuing the commands below:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
## Load Nextflow and Singularity environment modules
|
## Load Nextflow and Singularity environment modules
|
||||||
module load Nextflow
|
module load anaconda3/personal
|
||||||
|
conda install -c bioconda nextflow
|
||||||
```
|
```
|
||||||
|
|
||||||
>NB: You will need an account to use the HPC cluster CX1 in order to run the pipeline. If in doubt contact IT.
|
>NB: You will need an account to use the HPC cluster CX1 in order to run the pipeline. If in doubt contact IT.
|
||||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||||
>NB: To submit jobs to the Imperial College MEDBIO cluster, use `-profile imperial_mb` instead.
|
>NB: To submit jobs to the Imperial College MEDBIO cluster, use `-profile imperial,medbio` instead.
|
||||||
|
>NB: You will need a restricted access account to use the HPC cluster MEDBIO.
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
# nf-core/configs: Imperial MEDBIO HPC Configuration
|
|
||||||
|
|
||||||
All nf-core pipelines have been successfully configured for use on the MEDBIO cluster at Imperial College London HPC.
|
|
||||||
|
|
||||||
To use, run the pipeline with `-profile imperial_mb`. This will download and launch the [`imperial_mb.config`](../conf/imperial_mb.config) which has been pre-configured with a setup suitable for the MEDBIO cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
|
||||||
|
|
||||||
Before running the pipeline you will need to load Nextflow using the environment module system on the head node. You can do this by issuing the commands below:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
## Load Nextflow and Singularity environment modules
|
|
||||||
module load Nextflow
|
|
||||||
```
|
|
||||||
|
|
||||||
>NB: You will need an account to use the HPC cluster MEDBIO in order to run the pipeline. Access to the MEDBIO queue is exclusive. If in doubt contact IT.
|
|
||||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
|
||||||
>NB: To submit jobs to the standard CX1 cluster at Imperial College, use `-profile imperial` instead.
|
|
25
docs/nihbiowulf.md
Normal file
25
docs/nihbiowulf.md
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
# nf-core/configs: nihbiowulf Configuration
|
||||||
|
|
||||||
|
nf-core pipelines have been configured for use on the Biowulf cluster at the NIH.
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile nihbiowulf`. This will download and launch the [`profile.config`](../conf/profile.config) which has been pre-configured with a setup suitable for the Biowulf cluster at NIH. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||||
|
|
||||||
|
## Below are non-mandatory information e.g. on modules to load etc
|
||||||
|
|
||||||
|
Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on Biowulf. You can do this by issuing the commands below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
## Load Nextflow and Singularity environment modules
|
||||||
|
module purge
|
||||||
|
module load nextflow
|
||||||
|
module load singularity
|
||||||
|
```
|
||||||
|
|
||||||
|
## Below are non-mandatory information on iGenomes specific configuration
|
||||||
|
|
||||||
|
A partial local copy of the iGenomes resource is available on Biowulf. This is a copy of the Illumina iGenomes resource from several years ago and is not up-to-date with the s3-hosted nf-core iGenomes. There are some structural differences of note. In particular, if using BWA, the igenomes.conf should be modified to specify the BWA version folder, otherwise the BWA module will fail to find an appropriate index. To date, this is the only issue, however functionality has not been extensively tested with iGenomes on Biowulf. Nonetheless, you should, in theory, be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline.
|
||||||
|
|
||||||
|
You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
||||||
|
|
||||||
|
>NB: You will need an account to use the HPC cluster on Biowulf in order to run the pipeline. If in doubt contact CIT.
|
||||||
|
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster. The master process submitting jobs should be run either as a batch job or on an interactive node - not on the biowulf login node. If in doubt contact Biowulf staff.
|
49
docs/pipeline/rnavar/munin.md
Normal file
49
docs/pipeline/rnavar/munin.md
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
# nf-core/configs: MUNIN rnavar specific configuration
|
||||||
|
|
||||||
|
Extra specific configuration for rnavar pipeline
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile munin`.
|
||||||
|
|
||||||
|
This will download and launch the rnavar specific [`munin.config`](../../../conf/pipeline/rnavar/munin.config) which has been pre-configured with a setup suitable for the `MUNIN` cluster.
|
||||||
|
|
||||||
|
Example: `nextflow run nf-core/rnavar -profile munin`
|
||||||
|
|
||||||
|
## rnavar specific configurations for MUNIN
|
||||||
|
|
||||||
|
Specific configurations for `MUNIN` has been made for rnavar.
|
||||||
|
|
||||||
|
Genome references
|
||||||
|
|
||||||
|
* Path to `fasta`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa`
|
||||||
|
* Path to `fasta_fai`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa.fai`
|
||||||
|
* Path to `gtf`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.gtf`
|
||||||
|
* Path to `gene_bed`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.bed`
|
||||||
|
|
||||||
|
Known genome resources
|
||||||
|
|
||||||
|
* Path to `dbsnp`: `/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz`
|
||||||
|
* Path to `dbsnp_tbi`: `/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz.tbi`
|
||||||
|
* Path to `known_indels`: `/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz`
|
||||||
|
* Path to `known_indels_tbi`: `/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz.tbi`
|
||||||
|
|
||||||
|
STAR index
|
||||||
|
|
||||||
|
* Path to `star_index`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/STAR.2.7.9a_2x151bp/`
|
||||||
|
* Params `read_length` set to `151`
|
||||||
|
|
||||||
|
Variant annotation configurations
|
||||||
|
|
||||||
|
* Params `annotation_cache` and `cadd_cache` set to `true`
|
||||||
|
* Params `snpeff_db` set to `GRCh38.99`
|
||||||
|
* Params `vep_cache_version` set to `99`
|
||||||
|
* Params `vep_genome` set to `GRCh38`
|
||||||
|
* Path to `snpeff_cache`: `/data1/cache/snpEff/`
|
||||||
|
* Path to `vep_cache`: `/data1/cache/VEP/`
|
||||||
|
* Path to `pon`: `/data1/PON/vcfs/BTB.PON.vcf.gz`
|
||||||
|
* Path to `pon_index`: `/data1/PON/vcfs/BTB.PON.vcf.gz.tbi`
|
||||||
|
* Path to `cadd_indels`: `/data1/cache/CADD/v1.4/InDels.tsv.gz`
|
||||||
|
* Path to `cadd_indels_tbi`: `/data1/cache/CADD/v1.4/InDels.tsv.gz.tbi`
|
||||||
|
* Path to `cadd_wg_snvs`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz`
|
||||||
|
* Path to `cadd_wg_snvs_tbi`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz.tbi`
|
|
@ -41,13 +41,13 @@ profiles {
|
||||||
icr_davros { includeConfig "${params.custom_config_base}/conf/icr_davros.config" }
|
icr_davros { includeConfig "${params.custom_config_base}/conf/icr_davros.config" }
|
||||||
ifb_core { includeConfig "${params.custom_config_base}/conf/ifb_core.config" }
|
ifb_core { includeConfig "${params.custom_config_base}/conf/ifb_core.config" }
|
||||||
imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" }
|
imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" }
|
||||||
imperial_mb { includeConfig "${params.custom_config_base}/conf/imperial_mb.config" }
|
|
||||||
jax { includeConfig "${params.custom_config_base}/conf/jax.config" }
|
jax { includeConfig "${params.custom_config_base}/conf/jax.config" }
|
||||||
lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" }
|
lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" }
|
||||||
maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" }
|
maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" }
|
||||||
marvin { includeConfig "${params.custom_config_base}/conf/Marvin.config" }
|
marvin { includeConfig "${params.custom_config_base}/conf/Marvin.config" }
|
||||||
mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" }
|
mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" }
|
||||||
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
||||||
|
nihbiowulf { includeConfig "${params.custom_config_base}/conf/nihbiowulf.config" }
|
||||||
nu_genomics { includeConfig "${params.custom_config_base}/conf/nu_genomics.config" }
|
nu_genomics { includeConfig "${params.custom_config_base}/conf/nu_genomics.config" }
|
||||||
oist { includeConfig "${params.custom_config_base}/conf/oist.config" }
|
oist { includeConfig "${params.custom_config_base}/conf/oist.config" }
|
||||||
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
|
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
|
||||||
|
|
13
pipeline/rnavar.config
Normal file
13
pipeline/rnavar.config
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
/*
|
||||||
|
* -------------------------------------------------
|
||||||
|
* nfcore/rnavar custom profile Nextflow config file
|
||||||
|
* -------------------------------------------------
|
||||||
|
* Config options for custom environments.
|
||||||
|
* Cluster-specific config options should be saved
|
||||||
|
* in the conf/pipeline/rnavar folder and imported
|
||||||
|
* under a profile name here.
|
||||||
|
*/
|
||||||
|
|
||||||
|
profiles {
|
||||||
|
munin { includeConfig "${params.custom_config_base}/conf/pipeline/rnavar/munin.config" }
|
||||||
|
}
|
|
@ -10,5 +10,4 @@
|
||||||
|
|
||||||
profiles {
|
profiles {
|
||||||
imperial { includeConfig "${params.custom_config_base}/conf/pipeline/scflow/imperial.config" }
|
imperial { includeConfig "${params.custom_config_base}/conf/pipeline/scflow/imperial.config" }
|
||||||
imperial_mb { includeConfig "${params.custom_config_base}/conf/pipeline/scflow/imperial.config" } // intended
|
}
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in a new issue