From 3da1373c147101f16b5bb1306f78d3456ab8c1e3 Mon Sep 17 00:00:00 2001 From: "James A. Fellows Yates" Date: Thu, 14 Mar 2019 21:16:02 +0100 Subject: [PATCH 1/3] Updated path to centralised singularity cache --- conf/shh.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/shh.config b/conf/shh.config index 4088afd..3b58d23 100644 --- a/conf/shh.config +++ b/conf/shh.config @@ -7,7 +7,7 @@ params { singularity { enabled = true - cacheDir = "/projects1/users/$USER/nextflow/nf_cache/singularity/" + cacheDir = "/projects1/singularity_scratch/cache/" } process { From e503a3a0649267d5d3b368ba65d91808987e53f3 Mon Sep 17 00:00:00 2001 From: "James A. Fellows Yates" Date: Thu, 14 Mar 2019 21:21:31 +0100 Subject: [PATCH 2/3] Added new cluster and singularity cache --- docs/shh.md | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/docs/shh.md b/docs/shh.md index bcdda3c..67cf3b9 100644 --- a/docs/shh.md +++ b/docs/shh.md @@ -1,21 +1,17 @@ # nf-core/configs: SHH Configuration -All nf-core pipelines have been successfully configured for use on the Department of Archaeogenetic's SDAG cluster at the [Max Planck Institute for the Science of Human History (MPI-SHH)](http://shh.mpg.de). +All nf-core pipelines have been successfully configured for use on the Department of Archaeogenetic's SDAG/CDAG clusters at the [Max Planck Institute for the Science of Human History (MPI-SHH)](http://shh.mpg.de). -To use, run the pipeline with `-profile shh`. This will download and launch the [`shh.config`](../conf/shh.config) which has been pre-configured with a setup suitable for the SDAG cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. - -Note that the configuration file is currently optimised for `nf-core/eager`. It -will submit to the medium queue but with a walltime of 48 hours. - -## Preparation -Before running the pipeline you will need to create a the following folder in your `/projects1/users/` directory. This will be used to store the singularity software images, which will take up too much space for your home directory. - -This should be named as follows, replacing `` with your username: +To use, run the pipeline with `-profile shh`. This will download and launch the [`shh.config`](../conf/shh.config) which has been pre-configured with a setup suitable for the SDAG and CDAG clusters. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. The image will currently be centrally stored here: ```bash -"/projects1/users//nextflow/nf_cache/singularity/" +/projects1/singularity_scratch/cache/ ``` +however this will likely change to a read-only directory in the future that will be managed by IT. + +Note that **the configuration file is currently optimised for `nf-core/eager`**. It will submit to the short queue but with a walltime of 2 hours. + >NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact IT. ->NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the lhead nodes. If in doubt contact IT. +>NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. If in doubt contact IT. From ecb36dd8c23ab72d1f507237eba0740efe02f136 Mon Sep 17 00:00:00 2001 From: "James A. Fellows Yates" Date: Thu, 14 Mar 2019 21:26:43 +0100 Subject: [PATCH 3/3] Reduced memory and CPUs for cdag compatibility Also matches 'fair use' policy --- conf/shh.config | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/shh.config b/conf/shh.config index 3b58d23..43d192f 100644 --- a/conf/shh.config +++ b/conf/shh.config @@ -16,7 +16,7 @@ process { } params { - max_memory = 734.GB - max_cpus = 64 + max_memory = 256.GB + max_cpus = 32 max_time = 2.h }