1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-21 16:16:04 +00:00

Merge pull request #279 from pierrespc/master

Added Configuration files for running in Maestro HPC at Pasteur Institute, Paris
This commit is contained in:
James A. Fellows Yates 2021-10-08 19:15:47 +02:00 committed by GitHub
commit 73b6b74c69
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 218 additions and 0 deletions

View file

@ -49,6 +49,7 @@ jobs:
- 'imperial_mb'
- 'jax'
- 'lugh'
- 'maestro'
- 'mpcdf'
- 'munin'
- 'nu_genomics'

View file

@ -118,6 +118,7 @@ Currently documentation is available for the following systems:
* [ICR_DAVROS](docs/icr_davros.md)
* [JAX](docs/jax.md)
* [LUGH](docs/lugh.md)
* [MAESTRO](docs/maestro.md)
* [MPCDF](docs/mpcdf.md)
* [MUNIN](docs/munin.md)
* [NU_GENOMICS](docs/nu_genomics.md)

49
conf/maestro.config Normal file
View file

@ -0,0 +1,49 @@
params {
config_profile_description = 'Institut Pasteur Maestro cluster profile'
config_profile_url = 'https://research.pasteur.fr/en/equipment/maestro-compute-cluster/'
config_profile_contact = 'Pierre Luisi (@pierrespc)'
}
singularity {
enabled = true
autoMounts = true
runOptions = '--home $HOME:/home/$USER --bind /pasteur'
}
profiles {
normal {
process {
executor = 'slurm'
scratch = false
queue = 'common'
clusterOptions = '--qos=normal'
}
params {
igenomes_ignore = true
igenomesIgnore = true
max_memory = 400.GB
max_cpus = 96
max_time = 24.h
}
}
long {
process {
executor = 'slurm'
scratch = false
queue = 'common'
clusterOptions = '--qos=long'
}
params {
igenomes_ignore = true
igenomesIgnore = true
max_memory = 400.GB
max_cpus = 5
max_time = 8760.h
}
}
}

View file

@ -0,0 +1,116 @@
/*
* -------------------------------------------------
* Nextflow config file for running nf-core eager on whole genome data or mitogenomes
* -------------------------------------------------
* nextflow run nf-core/eager -profile maestro,<qos>,maestro,<genome> (where <qos> is long or normal and <genome> is nuclear, mitocondrial or unlimitedtime)
*/
params {
config_profile_name = 'nf-core/eager nuclear/mitocondrial - human profiles'
config_profile_description = "Simple profiles for assessing computational ressources that fit human nuclear dna, human mitogenomes processing. unlimitedtime is also available "
}
profiles {
nuclear {
process {
errorStrategy = 'retry'
maxRetries = 2
withName:'makeBWAIndex'{
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
time = { check_max( 12.h * task.attempt, 'time' ) }
}
withName:'adapter_removal'{
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
time = { check_max( 12.h * task.attempt, 'time' ) }
}
withName:'bwa'{
cpus = { check_max( 40 * task.attempt, 'cpus' ) }
memory = { check_max( 40.GB * task.attempt, 'memory' ) }
time = 24.h
cache = 'deep'
}
withName:'markduplicates'{
errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'finish' }
cpus = { check_max( 16 * task.attempt, 'cpus' ) }
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
time = { check_max( 12.h * task.attempt, 'time' ) }
}
withName:'damageprofiler'{
cpus = 1
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
time = { check_max( 6.h * task.attempt, 'time' ) }
}
withName:'fastp'{
cpus = 8
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
time = { check_max( 6.h * task.attempt, 'time' ) }
}
withName:'fastqc'{
cpus = 2
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
time = { check_max( 6.h * task.attempt, 'time' ) }
}
}
}
mitocondrial {
process {
errorStrategy = 'retry'
maxRetries = 2
withName:'makeBWAIndex'{
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
time = { check_max( 12.h * task.attempt, 'time' ) }
}
withName:'adapter_removal'{
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
time = { check_max( 12.h * task.attempt, 'time' ) }
}
withName:'bwa'{
cpus = { check_max( 5 * task.attempt, 'cpus' ) }
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
time = 24.h
}
withName:'markduplicates'{
errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'finish' }
cpus = { check_max( 5 * task.attempt, 'cpus' ) }
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
time = { check_max( 6.h * task.attempt, 'time' ) }
}
withName:'damageprofiler'{
cpus = 1
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
time = { check_max( 3.h * task.attempt, 'time' ) }
}
withName:'fastp'{
cpus = 8
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
time = { check_max( 3.h * task.attempt, 'time' ) }
}
withName:'fastqc'{
cpus = 2
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
time = { check_max( 6.h * task.attempt, 'time' ) }
}
}
}
unlimitedtime {
process {
errorStrategy = 'finish'
cpus = 5
memory = 200.GB
time = 8760.h
}
}

19
docs/maestro.md Normal file
View file

@ -0,0 +1,19 @@
# nf-core/configs Maestro (at Pateur Institute, Paris) Configuration
To use, run the pipeline with `-profile maestro,<qos>` (with qos being long or normal). This will download and launch the maestro.config which has been pre-configured with a setup suitable for the Maestro cluster on either the long or normal qos.
Using one of these profiles, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline
## needed Modules
Please first load java, nextflow and singularity modules
`module load java`
`module load nextflow`
`module load singularity`
Also, do not forget to run nextflow using tmux or alike.
## Other profiles at Pasteur
If you are using TARS cluster, please refer to pasteur profile.
Please refer to docs/pasteur.md for installing and running nf-core instructions.

View file

@ -0,0 +1,30 @@
# nf-core/configs maestro eager specific configuration
Extra specific configuration for eager pipeline for human DNA data processing
## Usage
To use, run the pipeline with `-profile maestro,<qos>,<type>`, where qos can be normal or long and type can be nuclear or mitochondrial
This will download and launch the eager specific [`maestro.config`](../../../conf/pipeline/eager/maestro.config) which has been pre-configured with a setup suitable for the Maestro cluster.
Example: `nextflow run nf-core/eager -profile maestro,normal,nuclear`
## eager specific configurations for maestro
Specific configurations for maestro has been made for eager.
We decided not to provide any tool parameters here, and focus the profile only for resource management: Maestro profiles runs with default nf-core/eager parameters, but with modifications concerning time (limit to 24h in normal qos, so increasing the memory and CPUs, specially for alignments).
## nuclear
Increases the number of CPUs and the amount of memory for key processes
## mitochondrial
More limited computational resources
## unlimitedtime
Every process has one year time limit. To be used only when some processes can not be completed for time reasons when using mitochondrial or nuclear profiles.
Expect slow processes when using this profile because only 5 CPUs are available at a time.

View file

@ -43,6 +43,7 @@ profiles {
imperial_mb { includeConfig "${params.custom_config_base}/conf/imperial_mb.config" }
jax { includeConfig "${params.custom_config_base}/conf/jax.config" }
lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" }
maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" }
mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" }
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
nu_genomics { includeConfig "${params.custom_config_base}/conf/nu_genomics.config" }

View file

@ -11,4 +11,5 @@
profiles {
mpcdf { includeConfig "${params.custom_config_base}/conf/pipeline/eager/mpcdf.config" }
eva { includeConfig "${params.custom_config_base}/conf/pipeline/eager/eva.config" }
maestro { includeConfig "${params.custom_config_base}/conf/pipeline/eager/maestro.config" }
}