mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-22 08:29:54 +00:00
Merge pull request #178 from jfy133/master
Add Max Planck Computing and Data Facility Cobra/Raven Clusters
This commit is contained in:
commit
804f53c58c
7 changed files with 166 additions and 2 deletions
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
|
@ -16,7 +16,7 @@ jobs:
|
||||||
needs: test_all_profiles
|
needs: test_all_profiles
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
profile: ['awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'kraken', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh']
|
profile: ['awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'kraken', 'mpcdf', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh']
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Install Nextflow
|
- name: Install Nextflow
|
||||||
|
|
|
@ -113,6 +113,7 @@ Currently documentation is available for the following systems:
|
||||||
* [HEBBE](docs/hebbe.md)
|
* [HEBBE](docs/hebbe.md)
|
||||||
* [ICR_DAVROS](docs/icr_davros.md)
|
* [ICR_DAVROS](docs/icr_davros.md)
|
||||||
* [KRAKEN](docs/kraken.md)
|
* [KRAKEN](docs/kraken.md)
|
||||||
|
* [MPCDF](docs/mpcdf.md)
|
||||||
* [MUNIN](docs/munin.md)
|
* [MUNIN](docs/munin.md)
|
||||||
* [PASTEUR](docs/pasteur.md)
|
* [PASTEUR](docs/pasteur.md)
|
||||||
* [PHOENIX](docs/phoenix.md)
|
* [PHOENIX](docs/phoenix.md)
|
||||||
|
|
51
conf/mpcdf.config
Normal file
51
conf/mpcdf.config
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
params {
|
||||||
|
config_profile_description = 'MPCDF HPC profiles (unoffically) provided by nf-core/configs.'
|
||||||
|
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||||
|
config_profile_url = 'https://www.mpcdf.mpg.de/services/computing'
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
profiles {
|
||||||
|
cobra {
|
||||||
|
// Does not have singularity! Conda module must be used, but it is
|
||||||
|
// recommended to set NXF_CONDA_CACHEDIR var in ~/.bash{_profile,rc}
|
||||||
|
// To create common cache dir
|
||||||
|
|
||||||
|
process {
|
||||||
|
beforeScript = 'module load anaconda/3/2020.02'
|
||||||
|
executor = 'slurm'
|
||||||
|
}
|
||||||
|
|
||||||
|
executor {
|
||||||
|
queueSize = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
params {
|
||||||
|
config_profile_description = 'MPCDF cobra profile (unofficially) provided by nf-core/configs.'
|
||||||
|
max_memory = 725.GB
|
||||||
|
max_cpus = 80
|
||||||
|
max_time = 24.h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
raven {
|
||||||
|
// Does not have singularity! Conda module must be used, but it is
|
||||||
|
// recommended to set NXF_CONDA_CACHEDIR var in ~/.bash{_profile,rc}
|
||||||
|
// to create common cache dir
|
||||||
|
|
||||||
|
process {
|
||||||
|
beforeScript = 'module load anaconda/3/2020.02'
|
||||||
|
executor = 'slurm'
|
||||||
|
}
|
||||||
|
|
||||||
|
executor {
|
||||||
|
queueSize = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
params {
|
||||||
|
config_profile_description = 'MPCDF raven profile (unofficially) provided by nf-core/configs.'
|
||||||
|
max_memory = 368.GB
|
||||||
|
max_cpus = 192
|
||||||
|
max_time = 24.h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
64
conf/pipeline/eager/mpcdf.config
Normal file
64
conf/pipeline/eager/mpcdf.config
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
// Profile config names for nf-core/configs
|
||||||
|
|
||||||
|
params {
|
||||||
|
// Specific nf-core/configs params
|
||||||
|
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||||
|
config_profile_description = 'nf-core/eager MPCDF profile provided by nf-core/configs'
|
||||||
|
}
|
||||||
|
|
||||||
|
profile {
|
||||||
|
cobra {
|
||||||
|
// Specific nf-core/eager process configuration
|
||||||
|
process {
|
||||||
|
|
||||||
|
withName: malt {
|
||||||
|
maxRetries = 1
|
||||||
|
memory = 725.GB
|
||||||
|
cpus = 40
|
||||||
|
time = 24.h
|
||||||
|
}
|
||||||
|
|
||||||
|
withLabel:'sc_tiny'{
|
||||||
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
|
memory = { check_max( 1.GB * task.attempt, 'memory' ) }
|
||||||
|
time = 24.h
|
||||||
|
}
|
||||||
|
|
||||||
|
withLabel:'sc_small'{
|
||||||
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
|
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||||
|
time = 24.h
|
||||||
|
}
|
||||||
|
|
||||||
|
withLabel:'sc_medium'{
|
||||||
|
cpus = { check_max( 1, 'cpus' ) }
|
||||||
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
|
time = 24.h
|
||||||
|
}
|
||||||
|
|
||||||
|
withLabel:'mc_small'{
|
||||||
|
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
|
||||||
|
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||||
|
time = 24.h
|
||||||
|
}
|
||||||
|
|
||||||
|
withLabel:'mc_medium' {
|
||||||
|
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
|
||||||
|
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||||
|
time = 24.h
|
||||||
|
}
|
||||||
|
|
||||||
|
withLabel:'mc_large'{
|
||||||
|
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
|
||||||
|
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||||
|
time = 24.h
|
||||||
|
}
|
||||||
|
|
||||||
|
withLabel:'mc_huge'{
|
||||||
|
cpus = { check_max( 32 * task.attempt, 'cpus' ) }
|
||||||
|
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
||||||
|
time = 24.h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
45
docs/mpcdf.md
Normal file
45
docs/mpcdf.md
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
# nf-core/configs: MPCDF Configuration
|
||||||
|
|
||||||
|
All nf-core pipelines have been successfully configured for use on the HPCs at [Max Planck Computing and Data Facility](https://www.mpcdf.mpg.de/).
|
||||||
|
|
||||||
|
> :warning: these profiles are not officially supported by the MPCDF.
|
||||||
|
|
||||||
|
To run Nextflow, the `jdk` module must be loaded. To use the nf-core profile(s), run the pipeline with `-profile <cluster>,mpcdf`.
|
||||||
|
|
||||||
|
Currently the following clusters are supported: cobra, raven
|
||||||
|
|
||||||
|
>NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. Check the [MPCDF documentation](https://www.mpcdf.mpg.de/services/computing).
|
||||||
|
|
||||||
|
## cobra
|
||||||
|
|
||||||
|
Cobra does not currently support singularity, therefore the anaconda/module is loaded for each process.
|
||||||
|
|
||||||
|
Due to this, we also recommend setting the `$NXF_CONDA_CACHEDIR` to a location of your choice to store all environments (so to prevent nextflow building the environment on every run).
|
||||||
|
|
||||||
|
To use: `-profile cobra,mpcdf`
|
||||||
|
|
||||||
|
Sets the following parameters:
|
||||||
|
|
||||||
|
- Maximum parallel running jobs: 8
|
||||||
|
- Max. memory: 750.GB
|
||||||
|
- Max. CPUs: 80
|
||||||
|
- Max. walltime: 24.h
|
||||||
|
|
||||||
|
## draco
|
||||||
|
|
||||||
|
:hammer_and_wrench: under testing.
|
||||||
|
|
||||||
|
## raven
|
||||||
|
|
||||||
|
Raven does not currently support singularity, therefore `module load anaconda/3/2020.02` is loaded for each process.
|
||||||
|
|
||||||
|
Due to this, we also recommend setting the `$NXF_CONDA_CACHEDIR` to a location of your choice to store all environments (so to prevent nextflow building the environment on every run).
|
||||||
|
|
||||||
|
To use: `-profile raven,mpcdf`
|
||||||
|
|
||||||
|
Sets the following parameters:
|
||||||
|
|
||||||
|
- Maximum parallel running jobs: 8
|
||||||
|
- Max. memory: 368.GB
|
||||||
|
- Max. CPUs: 192
|
||||||
|
- Max. walltime: 24.h
|
|
@ -30,6 +30,7 @@ profiles {
|
||||||
gis { includeConfig "${params.custom_config_base}/conf/gis.config" }
|
gis { includeConfig "${params.custom_config_base}/conf/gis.config" }
|
||||||
hebbe { includeConfig "${params.custom_config_base}/conf/hebbe.config" }
|
hebbe { includeConfig "${params.custom_config_base}/conf/hebbe.config" }
|
||||||
kraken { includeConfig "${params.custom_config_base}/conf/kraken.config" }
|
kraken { includeConfig "${params.custom_config_base}/conf/kraken.config" }
|
||||||
|
mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" }
|
||||||
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
||||||
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
|
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
|
||||||
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
||||||
|
|
|
@ -10,4 +10,6 @@
|
||||||
|
|
||||||
profiles {
|
profiles {
|
||||||
shh { includeConfig "${params.custom_config_base}/conf/pipeline/eager/shh.config" }
|
shh { includeConfig "${params.custom_config_base}/conf/pipeline/eager/shh.config" }
|
||||||
|
mpcdf { includeConfig "${params.custom_config_base}/conf/pipeline/eager/mpcdf.config" }
|
||||||
|
|
||||||
}
|
}
|
Loading…
Reference in a new issue