mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-12-22 10:38:16 +00:00
Split shh config for each cluster
This commit is contained in:
parent
89506a653c
commit
6f40c13473
3 changed files with 36 additions and 9 deletions
30
conf/shh_cdag.config
Normal file
30
conf/shh_cdag.config
Normal file
|
@ -0,0 +1,30 @@
|
|||
//Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'MPI-SHH CDAG cluster profile provided by nf-core/configs.'
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133), Maxime Borry (@Maxibor)'
|
||||
config_profile_url = 'https://shh.mpg.de'
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
runOptions = '-B /run/shm:/run/shm'
|
||||
cacheDir = "/projects1/singularity_scratch/cache/"
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = { task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' }
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 16
|
||||
}
|
||||
|
||||
params {
|
||||
max_memory = 256.GB
|
||||
max_cpus = 32
|
||||
max_time = 720.h
|
||||
//Illumina iGenomes reference file path
|
||||
igenomes_base = "/projects1/public_data/igenomes/"
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
//Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'MPI SHH cluster profile provided by nf-core/configs.'
|
||||
config_profile_description = 'MPI-SHH SDAG cluster profile provided by nf-core/configs.'
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133), Maxime Borry (@Maxibor)'
|
||||
config_profile_url = 'https://shh.mpg.de'
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ singularity {
|
|||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = { task.memory > 756.GB ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' }
|
||||
queue = { task.memory > 756.GB || task.cpus > 64 ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' }
|
||||
}
|
||||
|
||||
executor {
|
11
docs/shh.md
11
docs/shh.md
|
@ -2,18 +2,15 @@
|
|||
|
||||
All nf-core pipelines have been successfully configured for use on the Department of Archaeogenetic's SDAG/CDAG clusters at the [Max Planck Institute for the Science of Human History (MPI-SHH)](http://shh.mpg.de).
|
||||
|
||||
To use, run the pipeline with `-profile shh`. This will download and launch the [`shh.config`](../conf/shh.config) which has been pre-configured with a setup suitable for the SDAG and CDAG clusters. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. The image will currently be centrally stored here:
|
||||
To use, run the pipeline either with `-profile shh_sdag` or `-profile ssh_cdag`. This will download and launch the [`shh.config`](../conf/shh.config) which has been pre-configured with a setup suitable for the SDAG and CDAG clusters respectively. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. The image will currently be centrally stored here:
|
||||
|
||||
```bash
|
||||
/projects1/singularity_scratch/cache/
|
||||
```
|
||||
|
||||
however this will likely change to a read-only directory in the future that will be managed by IT.
|
||||
however this will likely change to a read-only directory in the future that will be managed by the IT team.
|
||||
|
||||
This configuration will automatically choose the correct SLURM queue (`short`,`medium`,`long`,`supercruncher`) depending on the time and memory required by each process.
|
||||
This configuration will automatically choose the correct SLURM queue (`short`,`medium`,`long`) depending on the time and memory required by each process. `ssh_sdag` additionally allows for submission of jobs to the `supercruncher` when a job's requested memory exceeds 756GB.
|
||||
|
||||
Please note that there is no `supercruncher` queue on CDAG.
|
||||
|
||||
>NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact the IT team.
|
||||
>NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. If in doubt contact IT.
|
||||
>NB: The maximum CPUs/Mem are currently adapted for SDAG resource maximums - i.e. will exceed CDAG. Be careful when running larges jobs that error-retries may exceed limits and get 'stuck' in SLURM.
|
||||
|
|
Loading…
Reference in a new issue