1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-25 01:19:54 +00:00

Added the VSC UGent profile

This commit is contained in:
Nicolas Vannieuwkerke 2022-05-20 09:12:18 +02:00
parent be9711731a
commit cc86e6b270
2 changed files with 150 additions and 0 deletions

110
conf/vsc_ugent.config Normal file
View file

@ -0,0 +1,110 @@
// Specify the work directory
workDir = "$VSC_SCRATCH_VO_USER/work"
// Perform work directory cleanup when the run has succesfully completed
cleanup = true
// Reduce the job submit rate to about 10 per second, this way the server won't be bombarded with jobs
executor {
submitRateLimit = '10 sec'
}
// Specify that singularity should be used and where the cache dir will be for the images
singularity {
enabled = true
autoMounts = true
cacheDir = "$VSC_SCRATCH_VO_USER/singularity"
}
// Define profiles for each cluster
profiles {
skitty {
params {
config_profile_description = 'HPC_SKITTY profile for use on the Skitty cluster of the VSC HPC.'
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 177.GB
max_cpus = 36
max_time = 72.h
}
process {
executor = 'slurm'
queue = 'skitty'
maxRetries = 2
scratch = "$VSC_SCRATCH_VO_USER"
}
}
swalot {
params {
config_profile_description = 'HPC_SWALOT profile for use on the Swalot cluster of the VSC HPC.'
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 116.GB
max_cpus = 20
max_time = 72.h
}
process {
executor = 'slurm'
queue = 'swalot'
maxRetries = 2
scratch = "$VSC_SCRATCH_VO_USER"
}
}
victini {
params {
config_profile_description = 'HPC_VICTINI profile for use on the Victini cluster of the VSC HPC.'
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 88.GB
max_cpus = 36
max_time = 72.h
}
process {
executor = 'slurm'
queue = 'victini'
maxRetries = 2
scratch = "$VSC_SCRATCH_VO_USER"
}
}
kirlia {
params {
config_profile_description = 'HPC_KIRLIA profile for use on the Kirlia cluster of the VSC HPC.'
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 738.GB
max_cpus = 36
max_time = 72.h
}
process {
executor = 'slurm'
queue = 'kirlia'
maxRetries = 2
scratch = "$VSC_SCRATCH_VO_USER"
}
}
doduo {
params {
config_profile_description = 'HPC_DODUO profile for use on the Doduo cluster of the VSC HPC.'
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 250.GB
max_cpus = 96
max_time = 72.h
}
process {
executor = 'slurm'
queue = 'doduo'
maxRetries = 2
scratch = "$VSC_SCRATCH_VO_USER"
}
}
}

40
docs/vsc_ugent.md Normal file
View file

@ -0,0 +1,40 @@
# nf-core/configs: University of Ghent High Performance Computing Infrastructure (VSC)
> **NB:** You will need an [account](https://www.ugent.be/hpc/en/access/faq/access) to use the HPC cluster to run the pipeline.
First you should go to the cluster you want to run the pipeline on. You can check what clusters have the most free space on this [link](https://shieldon.ugent.be:8083/pbsmon-web-users/). Use the following commands to easily switch between clusters:
```shell
module purge
module swap cluster/<CLUSTER>
```
Before running the pipeline you will need to create a SLURM/PBS script to submit as a job.
```bash
#!/bin/bash
module load Nextflow
nextflow run <pipeline> -profile vsc_ugent,<CLUSTER> <Add your other parameters>
```
I also highly recommend specifying a location of a Singularity cache directory, by specifying the location with the `$SINGULARITY_CACHEDIR` bash environment variable in your `.bash_profile` or `.bashrc` or by adding it to your SLURM/PBS script. If this cache directory is not specified, the cache directory defaults to your `$HOME/.singularity` directory, which does not have a lot of disk space.
```shell
export SINGULARITY_CACHEDIR=$VSC_SCRATCH_VO_USER/.singularity
```
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
The config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space.
You can also add several TORQUE options to the SLURM/PBS script. More about this on this [link](http://hpcugent.github.io/vsc_user_docs/pdf/intro-HPC-linux-gent.pdf#appendix.B).
To submit your job to the cluster by using the following command:
```shell
qsub <script name>.pbs
```
> **NB:** The profile only works for the clusters `skitty`, `swalot`, `victini`, `kirlia` and `doduo`.