From cc86e6b2704a6891b6f0f6ccba1bbbee10945f03 Mon Sep 17 00:00:00 2001 From: Nicolas Vannieuwkerke Date: Fri, 20 May 2022 09:12:18 +0200 Subject: [PATCH 01/12] Added the VSC UGent profile --- conf/vsc_ugent.config | 110 ++++++++++++++++++++++++++++++++++++++++++ docs/vsc_ugent.md | 40 +++++++++++++++ 2 files changed, 150 insertions(+) create mode 100644 conf/vsc_ugent.config create mode 100644 docs/vsc_ugent.md diff --git a/conf/vsc_ugent.config b/conf/vsc_ugent.config new file mode 100644 index 0000000..7df41f9 --- /dev/null +++ b/conf/vsc_ugent.config @@ -0,0 +1,110 @@ +// Specify the work directory +workDir = "$VSC_SCRATCH_VO_USER/work" + +// Perform work directory cleanup when the run has succesfully completed +cleanup = true + +// Reduce the job submit rate to about 10 per second, this way the server won't be bombarded with jobs +executor { + submitRateLimit = '10 sec' +} + +// Specify that singularity should be used and where the cache dir will be for the images +singularity { + enabled = true + autoMounts = true + cacheDir = "$VSC_SCRATCH_VO_USER/singularity" +} + +// Define profiles for each cluster +profiles { + skitty { + params { + config_profile_description = 'HPC_SKITTY profile for use on the Skitty cluster of the VSC HPC.' + config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)' + config_profile_url = 'https://www.ugent.be/hpc/en' + max_memory = 177.GB + max_cpus = 36 + max_time = 72.h + } + + process { + executor = 'slurm' + queue = 'skitty' + maxRetries = 2 + scratch = "$VSC_SCRATCH_VO_USER" + } + } + + swalot { + params { + config_profile_description = 'HPC_SWALOT profile for use on the Swalot cluster of the VSC HPC.' + config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)' + config_profile_url = 'https://www.ugent.be/hpc/en' + max_memory = 116.GB + max_cpus = 20 + max_time = 72.h + } + + process { + executor = 'slurm' + queue = 'swalot' + maxRetries = 2 + scratch = "$VSC_SCRATCH_VO_USER" + } + } + + victini { + params { + config_profile_description = 'HPC_VICTINI profile for use on the Victini cluster of the VSC HPC.' + config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)' + config_profile_url = 'https://www.ugent.be/hpc/en' + max_memory = 88.GB + max_cpus = 36 + max_time = 72.h + } + + process { + executor = 'slurm' + queue = 'victini' + maxRetries = 2 + scratch = "$VSC_SCRATCH_VO_USER" + } + } + + kirlia { + params { + config_profile_description = 'HPC_KIRLIA profile for use on the Kirlia cluster of the VSC HPC.' + config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)' + config_profile_url = 'https://www.ugent.be/hpc/en' + max_memory = 738.GB + max_cpus = 36 + max_time = 72.h + } + + process { + executor = 'slurm' + queue = 'kirlia' + maxRetries = 2 + scratch = "$VSC_SCRATCH_VO_USER" + } + } + + doduo { + params { + config_profile_description = 'HPC_DODUO profile for use on the Doduo cluster of the VSC HPC.' + config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)' + config_profile_url = 'https://www.ugent.be/hpc/en' + max_memory = 250.GB + max_cpus = 96 + max_time = 72.h + } + + process { + executor = 'slurm' + queue = 'doduo' + maxRetries = 2 + scratch = "$VSC_SCRATCH_VO_USER" + } + } +} \ No newline at end of file diff --git a/docs/vsc_ugent.md b/docs/vsc_ugent.md new file mode 100644 index 0000000..d0bc230 --- /dev/null +++ b/docs/vsc_ugent.md @@ -0,0 +1,40 @@ +# nf-core/configs: University of Ghent High Performance Computing Infrastructure (VSC) + +> **NB:** You will need an [account](https://www.ugent.be/hpc/en/access/faq/access) to use the HPC cluster to run the pipeline. + +First you should go to the cluster you want to run the pipeline on. You can check what clusters have the most free space on this [link](https://shieldon.ugent.be:8083/pbsmon-web-users/). Use the following commands to easily switch between clusters: + +```shell +module purge +module swap cluster/ +``` + +Before running the pipeline you will need to create a SLURM/PBS script to submit as a job. + +```bash +#!/bin/bash + +module load Nextflow + +nextflow run -profile vsc_ugent, +``` + +I also highly recommend specifying a location of a Singularity cache directory, by specifying the location with the `$SINGULARITY_CACHEDIR` bash environment variable in your `.bash_profile` or `.bashrc` or by adding it to your SLURM/PBS script. If this cache directory is not specified, the cache directory defaults to your `$HOME/.singularity` directory, which does not have a lot of disk space. + +```shell +export SINGULARITY_CACHEDIR=$VSC_SCRATCH_VO_USER/.singularity +``` + +All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway. +The config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space. + +You can also add several TORQUE options to the SLURM/PBS script. More about this on this [link](http://hpcugent.github.io/vsc_user_docs/pdf/intro-HPC-linux-gent.pdf#appendix.B). + +To submit your job to the cluster by using the following command: + +```shell +qsub