mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-11 04:23:10 +00:00
commit
8174fe6d52
24 changed files with 248 additions and 133 deletions
4
.github/workflows/linting.yml
vendored
4
.github/workflows/linting.yml
vendored
|
@ -7,9 +7,7 @@ jobs:
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-18.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- uses: actions/setup-node@v1
|
- uses: actions/setup-node@v2
|
||||||
with:
|
|
||||||
node-version: '10'
|
|
||||||
- name: Install markdownlint
|
- name: Install markdownlint
|
||||||
run: |
|
run: |
|
||||||
npm install -g markdownlint-cli
|
npm install -g markdownlint-cli
|
||||||
|
|
3
.github/workflows/main.yml
vendored
3
.github/workflows/main.yml
vendored
|
@ -32,6 +32,7 @@ jobs:
|
||||||
- 'ccga_med'
|
- 'ccga_med'
|
||||||
- 'cfc'
|
- 'cfc'
|
||||||
- 'cfc_dev'
|
- 'cfc_dev'
|
||||||
|
- 'cheaha'
|
||||||
- 'computerome'
|
- 'computerome'
|
||||||
- 'crick'
|
- 'crick'
|
||||||
- 'denbi_qbic'
|
- 'denbi_qbic'
|
||||||
|
@ -47,9 +48,9 @@ jobs:
|
||||||
- 'icr_davros'
|
- 'icr_davros'
|
||||||
- 'ifb_core'
|
- 'ifb_core'
|
||||||
- 'imperial'
|
- 'imperial'
|
||||||
- 'imperial_mb'
|
|
||||||
- 'jax'
|
- 'jax'
|
||||||
- 'lugh'
|
- 'lugh'
|
||||||
|
- 'marvin'
|
||||||
- 'maestro'
|
- 'maestro'
|
||||||
- 'mpcdf'
|
- 'mpcdf'
|
||||||
- 'munin'
|
- 'munin'
|
||||||
|
|
|
@ -100,6 +100,7 @@ Currently documentation is available for the following systems:
|
||||||
* [CCGA_DX](docs/ccga_dx.md)
|
* [CCGA_DX](docs/ccga_dx.md)
|
||||||
* [CCGA_MED](docs/ccga_med.md)
|
* [CCGA_MED](docs/ccga_med.md)
|
||||||
* [CFC](docs/cfc.md)
|
* [CFC](docs/cfc.md)
|
||||||
|
* [CHEAHA](docs/cheaha.md)
|
||||||
* [Computerome](docs/computerome.md)
|
* [Computerome](docs/computerome.md)
|
||||||
* [CRICK](docs/crick.md)
|
* [CRICK](docs/crick.md)
|
||||||
* [CZBIOHUB_AWS](docs/czbiohub.md)
|
* [CZBIOHUB_AWS](docs/czbiohub.md)
|
||||||
|
@ -113,9 +114,11 @@ Currently documentation is available for the following systems:
|
||||||
* [HASTA](docs/hasta.md)
|
* [HASTA](docs/hasta.md)
|
||||||
* [HEBBE](docs/hebbe.md)
|
* [HEBBE](docs/hebbe.md)
|
||||||
* [ICR_DAVROS](docs/icr_davros.md)
|
* [ICR_DAVROS](docs/icr_davros.md)
|
||||||
|
* [IMPERIAL](docs/imperial.md)
|
||||||
* [JAX](docs/jax.md)
|
* [JAX](docs/jax.md)
|
||||||
* [LUGH](docs/lugh.md)
|
* [LUGH](docs/lugh.md)
|
||||||
* [MAESTRO](docs/maestro.md)
|
* [MAESTRO](docs/maestro.md)
|
||||||
|
* [MARVIN](docs/marvin.md)
|
||||||
* [MPCDF](docs/mpcdf.md)
|
* [MPCDF](docs/mpcdf.md)
|
||||||
* [MUNIN](docs/munin.md)
|
* [MUNIN](docs/munin.md)
|
||||||
* [NU_GENOMICS](docs/nu_genomics.md)
|
* [NU_GENOMICS](docs/nu_genomics.md)
|
||||||
|
|
|
@ -7,11 +7,9 @@ params {
|
||||||
|
|
||||||
process {
|
process {
|
||||||
executor = 'slurm'
|
executor = 'slurm'
|
||||||
queue = { task.memory <= 170.GB ? 'c' : 'm' }
|
queue = { task.memory <= 120.GB ? 'c' : 'm' }
|
||||||
module = 'anaconda3/2019.10'
|
module = ['build-env/.f2021', 'build-env/f2021', 'anaconda3/2021.11']
|
||||||
|
clusterOptions = { ( task.queue == 'g' ? '--gres gpu:1 ' : '' ) << ( (task.queue == 'c' & task.time <= 1.h) ? '--qos rapid' : ( task.time <= 8.h ? '--qos short': ( task.time <= 48.h ? '--qos medium' : '--qos long' ) ) ) }
|
||||||
// --signal option will be handled by nextflow after 21.10.0 release (see https://github.com/nextflow-io/nextflow/issues/2163)
|
|
||||||
clusterOptions = { '--signal B:USR2 ' << ( (queue == 'c' & task.time <= 1.h) ? '--qos rapid' : ( task.time <= 8.h ? '--qos short': ( task.time <= 48.h ? '--qos medium' : '--qos long' ) ) ) }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
|
|
24
conf/cheaha.config
Normal file
24
conf/cheaha.config
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
params {
|
||||||
|
config_profile_name = 'cheaha'
|
||||||
|
config_profile_description = 'University of Alabama at Birmingham Cheaha HPC'
|
||||||
|
config_profile_contact = 'Lara Ianov (lianov@uab.edu) or Austyn Trull (atrull@uab.edu)'
|
||||||
|
config_profile_url = 'https://www.uab.edu/cores/ircp/bds'
|
||||||
|
}
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
autoMounts = true
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
queue = { task.memory <= 50.GB ? (task.time <= 2.h ? 'express' : task.time <= 12.h ? 'short' : task.time <= 50.h ? 'medium' : 'long') : (task.time <= 50.h ? 'largemem' : 'largemem-long')}
|
||||||
|
maxRetries = 3
|
||||||
|
beforeScript = 'module load Singularity'
|
||||||
|
}
|
||||||
|
|
||||||
|
params {
|
||||||
|
max_memory = 750.GB
|
||||||
|
max_cpus = 128
|
||||||
|
max_time = 150.h
|
||||||
|
}
|
|
@ -31,7 +31,7 @@ profiles {
|
||||||
config_profile_description = 'MPI-EVA archgen profile, provided by nf-core/configs.'
|
config_profile_description = 'MPI-EVA archgen profile, provided by nf-core/configs.'
|
||||||
max_memory = 256.GB
|
max_memory = 256.GB
|
||||||
max_cpus = 32
|
max_cpus = 32
|
||||||
max_time = 720.h
|
max_time = 365.d
|
||||||
//Illumina iGenomes reference file path
|
//Illumina iGenomes reference file path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,37 +1,80 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
|
|
||||||
params {
|
params {
|
||||||
// Config Params
|
// Config Params
|
||||||
config_profile_description = 'Imperial College London - HPC Profile -- provided by nf-core/configs.'
|
config_profile_description = 'Imperial College London - HPC Profile -- provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
||||||
config_profile_url = 'https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/'
|
config_profile_url = 'https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/'
|
||||||
|
|
||||||
// Resources
|
// Resources
|
||||||
max_memory = 256.GB
|
max_memory = 480.GB
|
||||||
max_cpus = 32
|
max_cpus = 40
|
||||||
max_time = 72.h
|
max_time = 1000.h
|
||||||
|
}
|
||||||
|
|
||||||
|
profiles {
|
||||||
|
imperial {
|
||||||
|
process {
|
||||||
|
executor = 'pbspro'
|
||||||
|
|
||||||
|
// Process-specific resource requirements
|
||||||
|
withLabel:process_low {
|
||||||
|
// TARGET QUEUE: throughput
|
||||||
|
cpus = { 2 * task.attempt }
|
||||||
|
memory = { 12.GB * task.attempt }
|
||||||
|
time = { 4.h * task.attempt }
|
||||||
|
}
|
||||||
|
withLabel:process_medium {
|
||||||
|
// TARGET QUEUE: throughput
|
||||||
|
cpus = 8
|
||||||
|
memory = { 32.GB * task.attempt }
|
||||||
|
time = { 8.h * task.attempt }
|
||||||
|
}
|
||||||
|
withLabel:process_high {
|
||||||
|
// TARGET QUEUE: general
|
||||||
|
cpus = 32
|
||||||
|
memory = { 62.GB * task.attempt }
|
||||||
|
time = { 16.h * task.attempt }
|
||||||
|
}
|
||||||
|
withLabel:process_long {
|
||||||
|
// TARGET QUEUE: long
|
||||||
|
cpus = 8
|
||||||
|
memory = 96.GB
|
||||||
|
time = { 72.h * task.attempt }
|
||||||
|
}
|
||||||
|
withLabel:process_high_memory {
|
||||||
|
// TARGET QUEUE: large memory
|
||||||
|
cpus = { 10 * task.attempt }
|
||||||
|
memory = { 120.GB * task.attempt }
|
||||||
|
time = { 12.h * task.attempt }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
medbio {
|
||||||
|
process {
|
||||||
|
executor = 'pbspro'
|
||||||
|
|
||||||
|
queue = 'pqmedbio-tput'
|
||||||
|
|
||||||
|
//queue = 'med-bio' //!! this is an alias and shouldn't be used
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
executor {
|
executor {
|
||||||
$pbspro {
|
$pbspro {
|
||||||
queueSize = 50
|
queueSize = 50
|
||||||
}
|
}
|
||||||
|
|
||||||
$local {
|
$local {
|
||||||
cpus = 2
|
cpus = 2
|
||||||
queueSize = 1
|
queueSize = 1
|
||||||
memory = '32 GB'
|
memory = '6 GB'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
enabled = true
|
enabled = true
|
||||||
autoMounts = true
|
autoMounts = true
|
||||||
runOptions = "-B /rds/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
runOptions = "-B /rds/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
||||||
}
|
|
||||||
|
|
||||||
process {
|
|
||||||
|
|
||||||
executor = 'pbspro'
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,44 +0,0 @@
|
||||||
//Profile config names for nf-core/configs
|
|
||||||
|
|
||||||
params {
|
|
||||||
// Config Params
|
|
||||||
config_profile_description = 'Imperial College London - MEDBIO QUEUE - HPC Profile -- provided by nf-core/configs.'
|
|
||||||
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
|
||||||
config_profile_url = 'https://www.imperial.ac.uk/bioinformatics-data-science-group/resources/uk-med-bio/'
|
|
||||||
|
|
||||||
// Resources
|
|
||||||
max_memory = 640.GB
|
|
||||||
max_cpus = 32
|
|
||||||
max_time = 168.h
|
|
||||||
}
|
|
||||||
|
|
||||||
executor {
|
|
||||||
$pbspro {
|
|
||||||
queueSize = 50
|
|
||||||
}
|
|
||||||
|
|
||||||
$local {
|
|
||||||
cpus = 2
|
|
||||||
queueSize = 1
|
|
||||||
memory = '32 GB'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
singularity {
|
|
||||||
enabled = true
|
|
||||||
autoMounts = true
|
|
||||||
runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
|
||||||
}
|
|
||||||
|
|
||||||
process {
|
|
||||||
|
|
||||||
executor = 'pbspro'
|
|
||||||
queue = 'pqmedbio-tput'
|
|
||||||
|
|
||||||
//queue = 'med-bio' //!! this is an alias and shouldn't be used
|
|
||||||
|
|
||||||
withLabel:process_large {
|
|
||||||
queue = 'pqmedbio-large'
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
20
conf/marvin.config
Normal file
20
conf/marvin.config
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
//Profile config names for nf-core/configs
|
||||||
|
params {
|
||||||
|
config_profile_description = 'Config file for Marvin Cluster (UPF-CSIC), based on nf-core/configs'
|
||||||
|
config_profile_contact = 'pc.quilis@gmail.com (Pablo Carrion)'
|
||||||
|
config_profile_url = 'https://www.ibe.upf-csic.es'
|
||||||
|
max_memory = 256.GB
|
||||||
|
max_cpus = 32
|
||||||
|
max_time = 960.h
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup = false
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
autoMounts = true
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
}
|
|
@ -18,6 +18,8 @@ profiles {
|
||||||
|
|
||||||
executor {
|
executor {
|
||||||
queueSize = 8
|
queueSize = 8
|
||||||
|
pollInterval = '1 min'
|
||||||
|
queueStatInterval = '5 min'
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set $NXF_SINGULARITY_CACHEDIR in your ~/.bash_profile
|
// Set $NXF_SINGULARITY_CACHEDIR in your ~/.bash_profile
|
||||||
|
@ -46,6 +48,8 @@ profiles {
|
||||||
|
|
||||||
executor {
|
executor {
|
||||||
queueSize = 8
|
queueSize = 8
|
||||||
|
pollInterval = '1 min'
|
||||||
|
queueStatInterval = '5 min'
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set $NXF_SINGULARITY_CACHEDIR in your ~/.bash_profile
|
// Set $NXF_SINGULARITY_CACHEDIR in your ~/.bash_profile
|
||||||
|
@ -58,7 +62,7 @@ profiles {
|
||||||
params {
|
params {
|
||||||
config_profile_description = 'MPCDF raven profile (unofficially) provided by nf-core/configs.'
|
config_profile_description = 'MPCDF raven profile (unofficially) provided by nf-core/configs.'
|
||||||
max_memory = 368.GB
|
max_memory = 368.GB
|
||||||
max_cpus = 192
|
max_cpus = 72
|
||||||
max_time = 24.h
|
max_time = 24.h
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,9 @@ process {
|
||||||
}
|
}
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
enabled = true
|
cacheDir = '/data1/containers/'
|
||||||
cacheDir = '/data1/containers/'
|
enabled = true
|
||||||
|
runOptions = "--bind /media/BTB_2021_01"
|
||||||
}
|
}
|
||||||
|
|
||||||
// To use docker, use nextflow run -profile munin,docker
|
// To use docker, use nextflow run -profile munin,docker
|
||||||
|
|
|
@ -19,57 +19,60 @@ process {
|
||||||
withLabel:'sc_tiny'{
|
withLabel:'sc_tiny'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 1.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 1.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_small'{
|
withLabel:'sc_small'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_medium'{
|
withLabel:'sc_medium'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_small'{
|
withLabel:'mc_small'{
|
||||||
cpus = { check_max( 2, 'cpus' ) }
|
cpus = { check_max( 2, 'cpus' ) }
|
||||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_medium' {
|
withLabel:'mc_medium' {
|
||||||
cpus = { check_max( 4, 'cpus' ) }
|
cpus = { check_max( 4, 'cpus' ) }
|
||||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_large'{
|
withLabel:'mc_large'{
|
||||||
cpus = { check_max( 8, 'cpus' ) }
|
cpus = { check_max( 8, 'cpus' ) }
|
||||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_huge'{
|
withLabel:'mc_huge'{
|
||||||
cpus = { check_max( 32, 'cpus' ) }
|
cpus = { check_max( 32, 'cpus' ) }
|
||||||
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
time = '365.d'
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fixes for SGE and Java incompatibility due to Java using more memory than you tell it to use
|
// Fixes for SGE and Java incompatibility due to Java using more memory than you tell it to use
|
||||||
|
|
||||||
withName: makeSeqDict {
|
withName: makeSeqDict {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: fastqc {
|
withName: fastqc {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: adapter_removal {
|
withName: adapter_removal {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: samtools_flagstat {
|
withName: samtools_flagstat {
|
||||||
|
@ -82,10 +85,13 @@ process {
|
||||||
|
|
||||||
withName: dedup {
|
withName: dedup {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: markduplicates {
|
withName: markduplicates {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
memory = { check_max( 20.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: library_merge {
|
withName: library_merge {
|
||||||
|
@ -113,34 +119,43 @@ process {
|
||||||
|
|
||||||
withName: maltextract {
|
withName: maltextract {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: multivcfanalyzer {
|
withName: multivcfanalyzer {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: mtnucratio {
|
withName: mtnucratio {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: vcf2genome {
|
withName: vcf2genome {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: qualimap {
|
withName: qualimap {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : task.exitStatus in [255] ? 'ignore' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: damageprofiler {
|
withName: damageprofiler {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circularmapper {
|
withName: circularmapper {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circulargenerator {
|
withName: circulargenerator {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: preseq {
|
withName: preseq {
|
||||||
|
@ -150,6 +165,7 @@ process {
|
||||||
|
|
||||||
withName: genotyping_ug {
|
withName: genotyping_ug {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -180,57 +196,53 @@ profiles {
|
||||||
withLabel:'sc_tiny'{
|
withLabel:'sc_tiny'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 1.5.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 1.5.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_small'{
|
withLabel:'sc_small'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_medium'{
|
withLabel:'sc_medium'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_small'{
|
withLabel:'mc_small'{
|
||||||
cpus = { check_max( 2, 'cpus' ) }
|
cpus = { check_max( 2, 'cpus' ) }
|
||||||
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_medium' {
|
withLabel:'mc_medium' {
|
||||||
cpus = { check_max( 4, 'cpus' ) }
|
cpus = { check_max( 4, 'cpus' ) }
|
||||||
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_large'{
|
withLabel:'mc_large'{
|
||||||
cpus = { check_max( 8, 'cpus' ) }
|
cpus = { check_max( 8, 'cpus' ) }
|
||||||
memory = { check_max( 24.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 24.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_huge'{
|
withLabel:'mc_huge'{
|
||||||
cpus = { check_max( 32, 'cpus' ) }
|
cpus = { check_max( 32, 'cpus' ) }
|
||||||
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fixes for SGE and Java incompatibility due to (and also some samtools?!) using more memory than you tell it to use
|
// Fixes for SGE and Java incompatibility due to (and also some samtools?!) using more memory than you tell it to use
|
||||||
|
|
||||||
withName: makeSeqDict {
|
withName: makeSeqDict {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: fastqc {
|
withName: fastqc {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: adapter_removal {
|
withName: adapter_removal {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: samtools_flagstat {
|
withName: samtools_flagstat {
|
||||||
|
@ -243,10 +255,13 @@ profiles {
|
||||||
|
|
||||||
withName: dedup {
|
withName: dedup {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: markduplicates {
|
withName: markduplicates {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: library_merge {
|
withName: library_merge {
|
||||||
|
@ -270,47 +285,57 @@ profiles {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=1000G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=1000G" }
|
||||||
cpus = { check_max( 32, 'cpus' ) }
|
cpus = { check_max( 32, 'cpus' ) }
|
||||||
memory = { check_max( 955.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 955.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName:hostremoval_input_fastq {
|
withName:hostremoval_input_fastq {
|
||||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||||
time = 1440.h
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: maltextract {
|
withName: maltextract {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: multivcfanalyzer {
|
withName: multivcfanalyzer {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: mtnucratio {
|
withName: mtnucratio {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: vcf2genome {
|
withName: vcf2genome {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: qualimap {
|
withName: qualimap {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : task.exitStatus in [255] ? 'ignore' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: damageprofiler {
|
withName: damageprofiler {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circularmapper {
|
withName: circularmapper {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circulargenerator {
|
withName: circulargenerator {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: genotyping_ug {
|
withName: genotyping_ug {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: preseq {
|
withName: preseq {
|
||||||
|
@ -345,57 +370,53 @@ profiles {
|
||||||
withLabel:'sc_tiny'{
|
withLabel:'sc_tiny'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 2.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 2.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_small'{
|
withLabel:'sc_small'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'sc_medium'{
|
withLabel:'sc_medium'{
|
||||||
cpus = { check_max( 1, 'cpus' ) }
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_small'{
|
withLabel:'mc_small'{
|
||||||
cpus = { check_max( 2, 'cpus' ) }
|
cpus = { check_max( 2, 'cpus' ) }
|
||||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_medium' {
|
withLabel:'mc_medium' {
|
||||||
cpus = { check_max( 4, 'cpus' ) }
|
cpus = { check_max( 4, 'cpus' ) }
|
||||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_large'{
|
withLabel:'mc_large'{
|
||||||
cpus = { check_max( 8, 'cpus' ) }
|
cpus = { check_max( 8, 'cpus' ) }
|
||||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withLabel:'mc_huge'{
|
withLabel:'mc_huge'{
|
||||||
cpus = { check_max( 32, 'cpus' ) }
|
cpus = { check_max( 32, 'cpus' ) }
|
||||||
memory = { check_max( 512.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 512.GB * task.attempt, 'memory' ) }
|
||||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fixes for SGE and Java incompatibility due to Java using more memory than you tell it to use
|
// Fixes for SGE and Java incompatibility due to Java using more memory than you tell it to use
|
||||||
|
|
||||||
withName: makeSeqDict {
|
withName: makeSeqDict {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: fastqc {
|
withName: fastqc {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: adapter_removal {
|
withName: adapter_removal {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: samtools_flagstat {
|
withName: samtools_flagstat {
|
||||||
|
@ -408,10 +429,13 @@ profiles {
|
||||||
|
|
||||||
withName: dedup {
|
withName: dedup {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: markduplicates {
|
withName: markduplicates {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
memory = { check_max( 48.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: library_merge {
|
withName: library_merge {
|
||||||
|
@ -430,7 +454,6 @@ profiles {
|
||||||
|
|
||||||
withName:hostremoval_input_fastq {
|
withName:hostremoval_input_fastq {
|
||||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||||
time = 1440.h
|
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: metagenomic_complexity_filter {
|
withName: metagenomic_complexity_filter {
|
||||||
|
@ -441,42 +464,53 @@ profiles {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=1000G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=1000G" }
|
||||||
cpus = { check_max( 32, 'cpus' ) }
|
cpus = { check_max( 32, 'cpus' ) }
|
||||||
memory = { check_max( 955.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 955.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: maltextract {
|
withName: maltextract {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: multivcfanalyzer {
|
withName: multivcfanalyzer {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: mtnucratio {
|
withName: mtnucratio {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: vcf2genome {
|
withName: vcf2genome {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: qualimap {
|
withName: qualimap {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : task.exitStatus in [255] ? 'ignore' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: damageprofiler {
|
withName: damageprofiler {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circularmapper {
|
withName: circularmapper {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: circulargenerator {
|
withName: circulargenerator {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: genotyping_ug {
|
withName: genotyping_ug {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
withName: preseq {
|
withName: preseq {
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
process {
|
process {
|
||||||
|
|
||||||
withName:'PICARD_MARKDUPLICATES' {
|
withName:'PICARD_MARKDUPLICATES' {
|
||||||
cpus = { check_max( 13 * task.attempt, 'cpus' ) }
|
memory = { check_max( 90.GB * task.attempt, 'memory' ) }
|
||||||
memory = { check_max( 130.GB * task.attempt, 'memory' ) }
|
|
||||||
}
|
}
|
||||||
withName:'DEEPVARIANT' {
|
withName:'DEEPVARIANT' {
|
||||||
cpus = { check_max( 16 * task.attempt, 'cpus' ) }
|
cpus = { check_max( 16 * task.attempt, 'cpus' ) }
|
||||||
|
|
|
@ -11,6 +11,7 @@ params {
|
||||||
igenomes_ignore = true
|
igenomes_ignore = true
|
||||||
|
|
||||||
// Genome references
|
// Genome references
|
||||||
|
genome = 'GRCh38'
|
||||||
fasta = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa'
|
fasta = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa'
|
||||||
fasta_fai = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa.fai'
|
fasta_fai = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa.fai'
|
||||||
gtf = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.gtf'
|
gtf = '/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.gtf'
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
params {
|
params {
|
||||||
// Config Params
|
// Config Params
|
||||||
config_profile_description = 'Imperial College London - HPC - nf-core/scFlow Profile -- provided by nf-core/configs.'
|
config_profile_description = 'Imperial College London - HPC - nf-core/scFlow Profile -- provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
config_profile_contact = 'NA'
|
||||||
|
|
||||||
// Analysis Resource Params
|
// Analysis Resource Params
|
||||||
ctd_folder = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/Analyses/scFlowResources/refs/ctd"
|
ctd_folder = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/Analyses/scFlowResources/refs/ctd"
|
||||||
|
@ -15,4 +15,4 @@ singularity {
|
||||||
autoMounts = true
|
autoMounts = true
|
||||||
cacheDir = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/.singularity-cache"
|
cacheDir = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/.singularity-cache"
|
||||||
runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,6 +51,12 @@ params {
|
||||||
primer_bed = 'https://github.com/artic-network/artic-ncov2019/raw/master/primer_schemes/nCoV-2019/V4/SARS-CoV-2.scheme.bed'
|
primer_bed = 'https://github.com/artic-network/artic-ncov2019/raw/master/primer_schemes/nCoV-2019/V4/SARS-CoV-2.scheme.bed'
|
||||||
scheme = 'SARS-CoV-2'
|
scheme = 'SARS-CoV-2'
|
||||||
}
|
}
|
||||||
|
'4.1' {
|
||||||
|
fasta = 'https://github.com/artic-network/artic-ncov2019/raw/master/primer_schemes/nCoV-2019/V4.1/SARS-CoV-2.reference.fasta'
|
||||||
|
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
||||||
|
primer_bed = 'https://github.com/artic-network/artic-ncov2019/raw/master/primer_schemes/nCoV-2019/V4.1/SARS-CoV-2.scheme.bed'
|
||||||
|
scheme = 'SARS-CoV-2'
|
||||||
|
}
|
||||||
'1200' {
|
'1200' {
|
||||||
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/primer_schemes/artic/nCoV-2019/V1200/nCoV-2019.reference.fasta'
|
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/primer_schemes/artic/nCoV-2019/V1200/nCoV-2019.reference.fasta'
|
||||||
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
||||||
|
|
|
@ -9,7 +9,7 @@ Before running the pipeline you will need to load Nextflow using the environment
|
||||||
```bash
|
```bash
|
||||||
## Load Nextflow environment module
|
## Load Nextflow environment module
|
||||||
module purge
|
module purge
|
||||||
module load nextflow/19.04.0
|
module load nextflow/21.10.6
|
||||||
```
|
```
|
||||||
|
|
||||||
A local copy of the [AWS-iGenomes](https://registry.opendata.aws/aws-igenomes/) resource has been made available on CBE so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline. You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
A local copy of the [AWS-iGenomes](https://registry.opendata.aws/aws-igenomes/) resource has been made available on CBE so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline. You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
||||||
|
|
32
docs/cheaha.md
Normal file
32
docs/cheaha.md
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
# nf-core/configs: Cheaha (UAB HPC) Configuration
|
||||||
|
|
||||||
|
All nf-core pipelines have been successfully configured for use on the Cheaha HPC cluster at the [The University of Alabama at Birmingham](https://www.uab.edu/home/).
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile cheaha`. This will download and launch the [`cheaha.config`](../conf/cheaha.config) which has been pre-configured with a setup suitable for the Cheaha HPC cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||||
|
|
||||||
|
Before running the pipeline you will need to load Singularity and Nextflow using the environment module system on Cheaha. You can do this by issuing the commands below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
## Singularity environment modules
|
||||||
|
module purge
|
||||||
|
module load Singularity
|
||||||
|
module load Nextflow
|
||||||
|
```
|
||||||
|
|
||||||
|
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||||
|
|
||||||
|
>NB: You will need an account to use the HPC cluster on Cheaha in order to run the pipeline. If in doubt contact UAB IT Research Computing.
|
||||||
|
|
||||||
|
>NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes (or alternatively in an interactive partition, but be aware of time limit). If in doubt contact UAB IT Research Computing.
|
||||||
|
|
||||||
|
>NB: Instead of using `module load Nextflow`, you may instead create a conda environment (e.g: `conda create -p $USER_DATA/nf-core_nextflow_env nf-core nextflow`) if you would like to have a more personalized environment of Nextflow (versions which may not be modules yet) and nf-core tools. This __requires__ you to instead do the following:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
module purge
|
||||||
|
module load Singularity
|
||||||
|
module load Anaconda3
|
||||||
|
# change path/enviroment name if according to what you created
|
||||||
|
conda activate $USER_DATA/nf-core_nextflow_env
|
||||||
|
```
|
||||||
|
|
||||||
|
>NB: while the jobs for each process of the pipeline are sent to the appropriate nodes, the current session must remain active while the pipeline is running. We recommend to use `screen` prior to loading any modules/environments. Once the pipeline starts you can detach the screen session by typing `Ctrl-a d` so you can safely logout of HPC, while keeping the pipeline active (and you may resume the screen session with `screen -r`). Other similar tools (e.g. `tmux`) may also be used.
|
|
@ -125,7 +125,7 @@ For Human and Mouse, we use [GENCODE](https://www.gencodegenes.org/) gene annota
|
||||||
|
|
||||||
## High Priority Queue
|
## High Priority Queue
|
||||||
|
|
||||||
If you would like to run with the _High Priority_ queue, specify the `highpriority` config profile after `czbiohub_aws`. When applied after the main `czbiohub_aws` config, it overwrites the process `queue` identifier.
|
If you would like to run with the *High Priority* queue, specify the `highpriority` config profile after `czbiohub_aws`. When applied after the main `czbiohub_aws` config, it overwrites the process `queue` identifier.
|
||||||
|
|
||||||
To use it, submit your run with with `-profile czbiohub_aws,highpriority`.
|
To use it, submit your run with with `-profile czbiohub_aws,highpriority`.
|
||||||
|
|
||||||
|
|
|
@ -2,15 +2,17 @@
|
||||||
|
|
||||||
All nf-core pipelines have been successfully configured for use on the CX1 cluster at Imperial College London HPC.
|
All nf-core pipelines have been successfully configured for use on the CX1 cluster at Imperial College London HPC.
|
||||||
|
|
||||||
To use, run the pipeline with `-profile imperial`. This will download and launch the [`imperial.config`](../conf/imperial.config) which has been pre-configured with a setup suitable for the CX1 cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
To use, run the pipeline with `-profile imperial,standard`. This will download and launch the [`imperial.config`](../conf/imperial.config) which has been pre-configured with a setup suitable for the CX1 cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||||
|
|
||||||
Before running the pipeline you will need to load Nextflow using the environment module system on the CX1 cluster. You can do this by issuing the commands below:
|
Before running the pipeline you will need to load Nextflow using the environment module system on the CX1 cluster. You can do this by issuing the commands below:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
## Load Nextflow and Singularity environment modules
|
## Load Nextflow and Singularity environment modules
|
||||||
module load Nextflow
|
module load anaconda3/personal
|
||||||
|
conda install -c bioconda nextflow
|
||||||
```
|
```
|
||||||
|
|
||||||
>NB: You will need an account to use the HPC cluster CX1 in order to run the pipeline. If in doubt contact IT.
|
>NB: You will need an account to use the HPC cluster CX1 in order to run the pipeline. If in doubt contact IT.
|
||||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||||
>NB: To submit jobs to the Imperial College MEDBIO cluster, use `-profile imperial_mb` instead.
|
>NB: To submit jobs to the Imperial College MEDBIO cluster, use `-profile imperial,medbio` instead.
|
||||||
|
>NB: You will need a restricted access account to use the HPC cluster MEDBIO.
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
# nf-core/configs: Imperial MEDBIO HPC Configuration
|
|
||||||
|
|
||||||
All nf-core pipelines have been successfully configured for use on the MEDBIO cluster at Imperial College London HPC.
|
|
||||||
|
|
||||||
To use, run the pipeline with `-profile imperial_mb`. This will download and launch the [`imperial_mb.config`](../conf/imperial_mb.config) which has been pre-configured with a setup suitable for the MEDBIO cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
|
||||||
|
|
||||||
Before running the pipeline you will need to load Nextflow using the environment module system on the head node. You can do this by issuing the commands below:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
## Load Nextflow and Singularity environment modules
|
|
||||||
module load Nextflow
|
|
||||||
```
|
|
||||||
|
|
||||||
>NB: You will need an account to use the HPC cluster MEDBIO in order to run the pipeline. Access to the MEDBIO queue is exclusive. If in doubt contact IT.
|
|
||||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
|
||||||
>NB: To submit jobs to the standard CX1 cluster at Imperial College, use `-profile imperial` instead.
|
|
9
docs/marvin.md
Normal file
9
docs/marvin.md
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
# nf-core/configs: Marvin Configuration
|
||||||
|
|
||||||
|
All nf-core pipelines have been successfully configured for use on the [Marvin HPC cluster of the Universitat Pompeu Fabra (UPF).] (https://www.upf.edu/web/sct-sit/marvin-cluster). To use, run the pipeline with -profile marvin.
|
||||||
|
|
||||||
|
NB: You will need an account to use the HPC cluster on the Marvin cluster in order to run the pipeline. If in doubt contact IT. NB: Nextflow will need to submit the jobs via the SLURM scheduler to the HPC cluster and as such the commands above will have to be executed on one of the head nodes. If in doubt contact IT.
|
||||||
|
|
||||||
|
## Pipeline Specific profiles
|
||||||
|
|
||||||
|
There are no specific profiles added for now
|
|
@ -25,6 +25,7 @@ profiles {
|
||||||
ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" }
|
ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" }
|
||||||
cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" }
|
cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" }
|
||||||
cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" }
|
cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" }
|
||||||
|
cheaha { includeConfig "${params.custom_config_base}/conf/cheaha.config" }
|
||||||
computerome { includeConfig "${params.custom_config_base}/conf/computerome.config" }
|
computerome { includeConfig "${params.custom_config_base}/conf/computerome.config" }
|
||||||
crick { includeConfig "${params.custom_config_base}/conf/crick.config" }
|
crick { includeConfig "${params.custom_config_base}/conf/crick.config" }
|
||||||
czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" }
|
czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" }
|
||||||
|
@ -41,10 +42,10 @@ profiles {
|
||||||
icr_davros { includeConfig "${params.custom_config_base}/conf/icr_davros.config" }
|
icr_davros { includeConfig "${params.custom_config_base}/conf/icr_davros.config" }
|
||||||
ifb_core { includeConfig "${params.custom_config_base}/conf/ifb_core.config" }
|
ifb_core { includeConfig "${params.custom_config_base}/conf/ifb_core.config" }
|
||||||
imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" }
|
imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" }
|
||||||
imperial_mb { includeConfig "${params.custom_config_base}/conf/imperial_mb.config" }
|
|
||||||
jax { includeConfig "${params.custom_config_base}/conf/jax.config" }
|
jax { includeConfig "${params.custom_config_base}/conf/jax.config" }
|
||||||
lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" }
|
lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" }
|
||||||
maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" }
|
maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" }
|
||||||
|
marvin { includeConfig "${params.custom_config_base}/conf/marvin.config" }
|
||||||
mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" }
|
mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" }
|
||||||
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
||||||
nihbiowulf { includeConfig "${params.custom_config_base}/conf/nihbiowulf.config" }
|
nihbiowulf { includeConfig "${params.custom_config_base}/conf/nihbiowulf.config" }
|
||||||
|
|
|
@ -10,5 +10,4 @@
|
||||||
|
|
||||||
profiles {
|
profiles {
|
||||||
imperial { includeConfig "${params.custom_config_base}/conf/pipeline/scflow/imperial.config" }
|
imperial { includeConfig "${params.custom_config_base}/conf/pipeline/scflow/imperial.config" }
|
||||||
imperial_mb { includeConfig "${params.custom_config_base}/conf/pipeline/scflow/imperial.config" } // intended
|
}
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in a new issue