From 3f534c154ea30152983865d10e6d8d13f68c8cf8 Mon Sep 17 00:00:00 2001 From: arontommi Date: Wed, 6 Nov 2019 13:10:09 +0100 Subject: [PATCH 01/46] documentation on how to fix bianca cluster configuration problem --- docs/uppmax.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/uppmax.md b/docs/uppmax.md index 07a7f14..d5bba37 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -26,6 +26,12 @@ If running on Bianca, you will have no internet connection and these configs wil Please use the nf-core helper tool on a different system to download the required pipeline files, and transfer them to bianca. This helper tool bundles the config files in this repo together with the pipeline files, so the profile will still be available. +Please note that Bianca only allocates 7 GB memory per core so max memory needs to be configured: + +```bash +--max_memory "112GB" +``` + ## Getting more memory If your nf-core pipeline run is running out of memory, you can run on a fat node with more memory using the following nextflow flags: From d70fc2605ac1dafb916d1999c95c46c9f89738a9 Mon Sep 17 00:00:00 2001 From: Andrew Frank Date: Thu, 12 Nov 2020 11:52:58 -0500 Subject: [PATCH 02/46] Fix parameters in google.md Change parameters from hyphens to underscores to accurately reflect google.config --- docs/google.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/google.md b/docs/google.md index 9e8c521..d488b8a 100644 --- a/docs/google.md +++ b/docs/google.md @@ -14,21 +14,21 @@ nextflow run nf-core/rnaseq -profile test,google --google_bucket Date: Tue, 17 Nov 2020 19:45:06 +0100 Subject: [PATCH 03/46] Move sdag queue specification block in sdag profile --- conf/shh.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/shh.config b/conf/shh.config index 02c186b..7f2fb8c 100644 --- a/conf/shh.config +++ b/conf/shh.config @@ -20,7 +20,6 @@ singularity { process { executor = 'slurm' - queue = { task.memory > 756.GB || task.cpus > 64 ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' } } executor { @@ -42,6 +41,7 @@ profiles { config_profile_description = 'SDAG MPI-SHH profile, provided by nf-core/configs.' max_memory = 2.TB max_cpus = 128 + queue = { task.memory > 756.GB || task.cpus > 64 ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' } } } } From 71d1ff2226d7393d65f81cb9eec569d39291e7af Mon Sep 17 00:00:00 2001 From: "Thiseas C. Lamnidis" Date: Wed, 18 Nov 2020 13:04:46 +0100 Subject: [PATCH 04/46] Add params block to sdag queue Co-authored-by: James A. Fellows Yates --- conf/shh.config | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/conf/shh.config b/conf/shh.config index 7f2fb8c..dc51e87 100644 --- a/conf/shh.config +++ b/conf/shh.config @@ -38,10 +38,12 @@ profiles { } sdag { params { - config_profile_description = 'SDAG MPI-SHH profile, provided by nf-core/configs.' - max_memory = 2.TB - max_cpus = 128 - queue = { task.memory > 756.GB || task.cpus > 64 ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' } + config_profile_description = 'SDAG MPI-SHH profile, provided by nf-core/configs.' + max_memory = 2.TB + max_cpus = 128 + } + process { + queue = { task.memory > 756.GB || task.cpus > 64 ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' } } } } From 2ea06ffab834d82bd2e9a05dd46c9b7863879bd1 Mon Sep 17 00:00:00 2001 From: Emelie Nilsso Date: Mon, 23 Nov 2020 21:20:36 +0100 Subject: [PATCH 05/46] Modified uppmax and ampliseq specific config to fit with the divided preparation of the database --- conf/pipeline/ampliseq/uppmax.config | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/conf/pipeline/ampliseq/uppmax.config b/conf/pipeline/ampliseq/uppmax.config index 36a1c3b..c594798 100644 --- a/conf/pipeline/ampliseq/uppmax.config +++ b/conf/pipeline/ampliseq/uppmax.config @@ -1,15 +1,27 @@ // Profile config names for nf-core/configs - +System.err.println("reading from the extra uppmax config, e.g. Daniels") params { // Specific nf-core/configs params config_profile_contact = 'Daniel Lundin (daniel.lundin@lnu.se)' config_profile_description = 'nf-core/ampliseq UPPMAX profile provided by nf-core/configs' } -withName: make_SILVA_132_16S_classifier { - clusterOptions = { "-A $params.project -C fat -p node -N 1 ${params.clusterOptions ?: ''}" } -} +process { + //"make_classifier" is the older version (just one step) of preparing the database for classification + withName: make_classifier { + clusterOptions = { "-A $params.project -C fat -p node -N 1 -t 7-00:00:00 ${params.clusterOptions ?: ''}" } + } -withName: classifier { - clusterOptions = { "-A $params.project -C fat -p node -N 1 ${params.clusterOptions ?: ''}" } + //"clfr_extract_seq" and "clfr_train" is the newer version, where database preparation is split in two to optimise resources + withName: clfr_extract_seq { + clusterOptions = { "-A $params.project -p core -n 1 -t 7-00:00:00 ${params.clusterOptions ?: ''}" } + } + + withName: clfr_train { + clusterOptions = { "-A $params.project -C fat -p node -N 1 -t 24:00:00 ${params.clusterOptions ?: ''}" } + } + + withName: classifier { + clusterOptions = { "-A $params.project -C fat -p node -N 1 ${params.clusterOptions ?: ''}" } + } } From 37e7cfae2c92cf87c2e8e0828ad1e07e093ebe0e Mon Sep 17 00:00:00 2001 From: Emelie Nilsso Date: Tue, 24 Nov 2020 07:14:23 +0100 Subject: [PATCH 06/46] Removed unnecessary code that was used to troubleshoot --- conf/pipeline/ampliseq/uppmax.config | 1 - 1 file changed, 1 deletion(-) diff --git a/conf/pipeline/ampliseq/uppmax.config b/conf/pipeline/ampliseq/uppmax.config index c594798..9f618fb 100644 --- a/conf/pipeline/ampliseq/uppmax.config +++ b/conf/pipeline/ampliseq/uppmax.config @@ -1,5 +1,4 @@ // Profile config names for nf-core/configs -System.err.println("reading from the extra uppmax config, e.g. Daniels") params { // Specific nf-core/configs params config_profile_contact = 'Daniel Lundin (daniel.lundin@lnu.se)' From d61ed01d2dde116b0ea0e068fac210f2d80c1a3f Mon Sep 17 00:00:00 2001 From: Emelie Nilsso Date: Wed, 25 Nov 2020 17:50:30 +0100 Subject: [PATCH 07/46] Updated process names according to ampliseq main.nf --- conf/pipeline/ampliseq/uppmax.config | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/conf/pipeline/ampliseq/uppmax.config b/conf/pipeline/ampliseq/uppmax.config index 9f618fb..b6cfea2 100644 --- a/conf/pipeline/ampliseq/uppmax.config +++ b/conf/pipeline/ampliseq/uppmax.config @@ -11,12 +11,12 @@ process { clusterOptions = { "-A $params.project -C fat -p node -N 1 -t 7-00:00:00 ${params.clusterOptions ?: ''}" } } - //"clfr_extract_seq" and "clfr_train" is the newer version, where database preparation is split in two to optimise resources - withName: clfr_extract_seq { + //"classifier_extract_seq" and "classifier_train" is the newer version, where database preparation is split in two to optimise resources + withName: classifier_extract_seq { clusterOptions = { "-A $params.project -p core -n 1 -t 7-00:00:00 ${params.clusterOptions ?: ''}" } } - withName: clfr_train { + withName: classifier_train { clusterOptions = { "-A $params.project -C fat -p node -N 1 -t 24:00:00 ${params.clusterOptions ?: ''}" } } From d9a800966213d301dd4f2c402169d4db0faf40e7 Mon Sep 17 00:00:00 2001 From: Phil Ewels Date: Wed, 25 Nov 2020 22:45:16 +0100 Subject: [PATCH 08/46] Better UPPMAX docs --- docs/uppmax.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/docs/uppmax.md b/docs/uppmax.md index 0f23cd1..8551067 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -2,6 +2,10 @@ All nf-core pipelines have been successfully configured for use on the Swedish UPPMAX clusters. +## Getting help + +We have a Slack channel dedicated to UPPMAX users on the nf-core Slack: [https://nfcore.slack.com/channels/uppmax](https://nfcore.slack.com/channels/uppmax) + ## Using the UPPMAX config profile To use, run the pipeline with `-profile uppmax` (one hyphen). @@ -12,14 +16,19 @@ In addition to this config profile, you will also need to specify an UPPMAX proj You can do this with the `--project` flag (two hyphens) when launching nextflow. For example: ```bash -nextflow run nf-core/PIPELINE -profile uppmax --project SNIC 2018/1-234 # ..rest of pipeline flags +nextflow run nf-core/PIPELINE -profile uppmax --project "snic2018-1-234" # ..rest of pipeline flags ``` +> NB: If you're not sure what your UPPMAX project ID is, try running `groups` or checking SUPR. + Before running the pipeline you will need to either install Nextflow or load it using the environment module system. -This config enables Nextflow to manage the pipeline jobs via the Slurm job scheduler. +This config enables Nextflow to manage the pipeline jobs via the Slurm job scheduler and using Singularity for software management. + Just run Nextflow on a login node and it will handle everything else. +Remember to use `-bg` to launch Nextflow in the background, so that the pipeline doesn't exit if you leave your terminal session. + ## Using iGenomes references A local copy of the iGenomes resource has been made available on all UPPMAX clusters so you should be able to run the pipeline against any reference available in the `igenomes.config`. @@ -40,7 +49,7 @@ Note that each job will still start with the same request as normal, but restart All jobs will be submitted to fat nodes using this method, so it's only for use in extreme circumstances. -## How to specify a UPPMAX cluster +## Different UPPMAX clusters The UPPMAX nf-core configuration profile uses the `hostname` of the active environment to automatically apply the following resource limits: From e96455e1809cdb0221fbbec502b933b8664978c1 Mon Sep 17 00:00:00 2001 From: Phil Ewels Date: Thu, 26 Nov 2020 01:43:09 +0100 Subject: [PATCH 09/46] Update docs/uppmax.md --- docs/uppmax.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/uppmax.md b/docs/uppmax.md index 8551067..dcd6ca0 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -16,7 +16,7 @@ In addition to this config profile, you will also need to specify an UPPMAX proj You can do this with the `--project` flag (two hyphens) when launching nextflow. For example: ```bash -nextflow run nf-core/PIPELINE -profile uppmax --project "snic2018-1-234" # ..rest of pipeline flags +nextflow run nf-core/PIPELINE -profile uppmax --project snic2018-1-234 # ..rest of pipeline flags ``` > NB: If you're not sure what your UPPMAX project ID is, try running `groups` or checking SUPR. From 06469901fe1aac666878353cfc848fd5b5e54f4e Mon Sep 17 00:00:00 2001 From: arontommi Date: Thu, 26 Nov 2020 11:24:29 +0100 Subject: [PATCH 10/46] typo --- docs/uppmax.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/uppmax.md b/docs/uppmax.md index e7d5327..170d618 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -82,7 +82,7 @@ For security reasons, there is no internet access on Bianca so you can't downloa You can follow the guide for downloading pipelines [for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use) Note that you will have to download the singularity images as well. -After transfering the pipeline and the singularity images to your project. Before running the pipeline you will have to indicate to nextflow where the singularity images are located by setting `NXF_SINGULARITY_CACHEDIR` : +After transffering the pipeline and the singularity images to your project. Before running the pipeline you will have to indicate to nextflow where the singularity images are located by setting `NXF_SINGULARITY_CACHEDIR` : `export NXF_SINGULARITY_CACHEDIR=Your_Location_For_The_Singularity_directory/.` From 5726ef3456210e4244aeb01a5eed776dbddadf89 Mon Sep 17 00:00:00 2001 From: arontommi Date: Thu, 26 Nov 2020 11:26:08 +0100 Subject: [PATCH 11/46] typo --- docs/uppmax.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/uppmax.md b/docs/uppmax.md index 170d618..1823408 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -79,7 +79,7 @@ To use it, submit with `-profile uppmax,devel`. For security reasons, there is no internet access on Bianca so you can't download from or upload files to the cluster directly. Before running a nf-core pipeline on bianca you will first have to download the pipeline and singularity images needed elsewhere and transfer them via the wharf area to your bianca project. -You can follow the guide for downloading pipelines [for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use) Note that you will have to download the singularity images as well. +You can follow the guide for downloading pipelines [for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use). Note that you will have to download the singularity images as well. After transffering the pipeline and the singularity images to your project. Before running the pipeline you will have to indicate to nextflow where the singularity images are located by setting `NXF_SINGULARITY_CACHEDIR` : From e6bb7a48521da14b2ca783e9af56fc655aa4a670 Mon Sep 17 00:00:00 2001 From: arontommi Date: Thu, 26 Nov 2020 11:37:59 +0100 Subject: [PATCH 12/46] capital names --- docs/uppmax.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/uppmax.md b/docs/uppmax.md index 1823408..24dcf20 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -77,7 +77,7 @@ To use it, submit with `-profile uppmax,devel`. ## Running on Bianca -For security reasons, there is no internet access on Bianca so you can't download from or upload files to the cluster directly. Before running a nf-core pipeline on bianca you will first have to download the pipeline and singularity images needed elsewhere and transfer them via the wharf area to your bianca project. +For security reasons, there is no internet access on Bianca so you can't download from or upload files to the cluster directly. Before running a nf-core pipeline on Bianca you will first have to download the pipeline and singularity images needed elsewhere and transfer them via the wharf area to your Bianca project. You can follow the guide for downloading pipelines [for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use). Note that you will have to download the singularity images as well. From f84fbb8f890d87ef25b3197ca5e4bdcf8adae4cd Mon Sep 17 00:00:00 2001 From: arontommi Date: Thu, 26 Nov 2020 11:39:51 +0100 Subject: [PATCH 13/46] typo --- docs/uppmax.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/uppmax.md b/docs/uppmax.md index 24dcf20..680feab 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -82,7 +82,7 @@ For security reasons, there is no internet access on Bianca so you can't downloa You can follow the guide for downloading pipelines [for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use). Note that you will have to download the singularity images as well. -After transffering the pipeline and the singularity images to your project. Before running the pipeline you will have to indicate to nextflow where the singularity images are located by setting `NXF_SINGULARITY_CACHEDIR` : +After transferring the pipeline and the singularity images to your project. Before running the pipeline you will have to indicate to nextflow where the singularity images are located by setting `NXF_SINGULARITY_CACHEDIR` : `export NXF_SINGULARITY_CACHEDIR=Your_Location_For_The_Singularity_directory/.` From 6480aef1db757943b09608212fcad01f16687a47 Mon Sep 17 00:00:00 2001 From: arontommi Date: Thu, 26 Nov 2020 11:41:33 +0100 Subject: [PATCH 14/46] typo --- docs/uppmax.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/uppmax.md b/docs/uppmax.md index 680feab..f4e66cb 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -82,7 +82,7 @@ For security reasons, there is no internet access on Bianca so you can't downloa You can follow the guide for downloading pipelines [for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use). Note that you will have to download the singularity images as well. -After transferring the pipeline and the singularity images to your project. Before running the pipeline you will have to indicate to nextflow where the singularity images are located by setting `NXF_SINGULARITY_CACHEDIR` : +Next transfer the pipeline and the singularity images to your project. Before running the pipeline you will have to indicate to nextflow where the singularity images are located by setting `NXF_SINGULARITY_CACHEDIR` : `export NXF_SINGULARITY_CACHEDIR=Your_Location_For_The_Singularity_directory/.` From febf38dd8ecaba8d0000643f37d7db024d4ce969 Mon Sep 17 00:00:00 2001 From: Emelie Nilsso Date: Thu, 26 Nov 2020 13:38:47 +0100 Subject: [PATCH 15/46] Removed an old process so that only relevant processes are included --- conf/pipeline/ampliseq/uppmax.config | 6 ------ 1 file changed, 6 deletions(-) diff --git a/conf/pipeline/ampliseq/uppmax.config b/conf/pipeline/ampliseq/uppmax.config index b6cfea2..2a8bc34 100644 --- a/conf/pipeline/ampliseq/uppmax.config +++ b/conf/pipeline/ampliseq/uppmax.config @@ -6,12 +6,6 @@ params { } process { - //"make_classifier" is the older version (just one step) of preparing the database for classification - withName: make_classifier { - clusterOptions = { "-A $params.project -C fat -p node -N 1 -t 7-00:00:00 ${params.clusterOptions ?: ''}" } - } - - //"classifier_extract_seq" and "classifier_train" is the newer version, where database preparation is split in two to optimise resources withName: classifier_extract_seq { clusterOptions = { "-A $params.project -p core -n 1 -t 7-00:00:00 ${params.clusterOptions ?: ''}" } } From c2c726551811ffdcb69f0d6906e0d1f65f7ccda6 Mon Sep 17 00:00:00 2001 From: arontommi Date: Thu, 26 Nov 2020 13:47:47 +0100 Subject: [PATCH 16/46] fix linting errors --- docs/uppmax.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/uppmax.md b/docs/uppmax.md index f4e66cb..43c6282 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -74,14 +74,12 @@ It is not suitable for use with real data. To use it, submit with `-profile uppmax,devel`. - ## Running on Bianca For security reasons, there is no internet access on Bianca so you can't download from or upload files to the cluster directly. Before running a nf-core pipeline on Bianca you will first have to download the pipeline and singularity images needed elsewhere and transfer them via the wharf area to your Bianca project. You can follow the guide for downloading pipelines [for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use). Note that you will have to download the singularity images as well. - Next transfer the pipeline and the singularity images to your project. Before running the pipeline you will have to indicate to nextflow where the singularity images are located by setting `NXF_SINGULARITY_CACHEDIR` : `export NXF_SINGULARITY_CACHEDIR=Your_Location_For_The_Singularity_directory/.` From ac108379cd02ee9ca9c611f516e23a38b3f68b44 Mon Sep 17 00:00:00 2001 From: arontommi Date: Thu, 26 Nov 2020 13:58:16 +0100 Subject: [PATCH 17/46] trailing spaces --- docs/uppmax.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/uppmax.md b/docs/uppmax.md index 43c6282..d9ac5ce 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -76,9 +76,9 @@ To use it, submit with `-profile uppmax,devel`. ## Running on Bianca -For security reasons, there is no internet access on Bianca so you can't download from or upload files to the cluster directly. Before running a nf-core pipeline on Bianca you will first have to download the pipeline and singularity images needed elsewhere and transfer them via the wharf area to your Bianca project. +For security reasons, there is no internet access on Bianca so you can't download from or upload files to the cluster directly. Before running a nf-core pipeline on Bianca you will first have to download the pipeline and singularity images needed elsewhere and transfer them via the wharf area to your Bianca project. -You can follow the guide for downloading pipelines [for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use). Note that you will have to download the singularity images as well. +You can follow the guide for downloading pipelines [for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use). Note that you will have to download the singularity images as well. Next transfer the pipeline and the singularity images to your project. Before running the pipeline you will have to indicate to nextflow where the singularity images are located by setting `NXF_SINGULARITY_CACHEDIR` : From 5590a9743ad2e5d185d9e074e98bcc2098d2a64d Mon Sep 17 00:00:00 2001 From: marcel-keller <61977721+marcel-keller@users.noreply.github.com> Date: Thu, 26 Nov 2020 15:05:25 +0200 Subject: [PATCH 18/46] Update ebc.config queueSize to 64 --- conf/ebc.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/ebc.config b/conf/ebc.config index 4df0596..f159487 100644 --- a/conf/ebc.config +++ b/conf/ebc.config @@ -16,7 +16,7 @@ beforeScript = 'module load nextflow' } executor { - queueSize = 16 + queueSize = 64 } params { max_memory = 12.GB From c9ceaff3b3fecb72240fd54dbd14447a72b85135 Mon Sep 17 00:00:00 2001 From: marcel-keller <61977721+marcel-keller@users.noreply.github.com> Date: Wed, 9 Dec 2020 15:11:21 +0200 Subject: [PATCH 19/46] change of directory for conda environments change of directory due to recent changes in our infrastructure --- conf/ebc.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/ebc.config b/conf/ebc.config index f159487..8f007ed 100644 --- a/conf/ebc.config +++ b/conf/ebc.config @@ -8,7 +8,7 @@ cleanup = true conda { - cacheDir = '/ebc_data/nf-core/conda' + cacheDir = '/gpfs/space/GI/ebc_data/software/nf-core/conda' } process { executor = 'slurm' From a61992d144c35aeed9adf08b69239ec97f0034fc Mon Sep 17 00:00:00 2001 From: Combiz Khozoie Date: Fri, 18 Dec 2020 10:21:26 +0000 Subject: [PATCH 20/46] Added institutional configs for Imperial and Imperial MEDBIO. Added institutional pipeline configs for scflow for Imperial and Imperial MEDBIO --- conf/imperial.config | 39 +++++++++++++++++++++++ conf/imperial_mb.config | 47 ++++++++++++++++++++++++++++ conf/pipeline/scflow/imperial.config | 18 +++++++++++ docs/imperial.md | 16 ++++++++++ docs/imperial_mb.md | 16 ++++++++++ docs/pipeline/scflow/imperial.md | 21 +++++++++++++ nfcore_custom.config | 4 +++ pipeline/scflow.config | 14 +++++++++ 8 files changed, 175 insertions(+) create mode 100644 conf/imperial.config create mode 100644 conf/imperial_mb.config create mode 100644 conf/pipeline/scflow/imperial.config create mode 100644 docs/imperial.md create mode 100644 docs/imperial_mb.md create mode 100644 docs/pipeline/scflow/imperial.md create mode 100644 pipeline/scflow.config diff --git a/conf/imperial.config b/conf/imperial.config new file mode 100644 index 0000000..6308a69 --- /dev/null +++ b/conf/imperial.config @@ -0,0 +1,39 @@ +//Profile config names for nf-core/configs + +params { + // Config Params + config_profile_description = 'Imperial College London - HPC Profile -- provided by nf-core/configs.' + config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)' + config_profile_url = 'https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/' + + // Resources + max_memory = 256.GB + max_cpus = 32 + max_time = 72.h +} + +workDir = "/rds/general/user/$USER/ephemeral/tmp" + +executor { + $pbspro { + queueSize = 50 + } + + $local { + cpus = 2 + queueSize = 1 + memory = '32 GB' + } +} + +singularity { + enabled = true + autoMounts = true + runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp" +} + +process { + + executor = 'pbspro' + +} \ No newline at end of file diff --git a/conf/imperial_mb.config b/conf/imperial_mb.config new file mode 100644 index 0000000..11b337a --- /dev/null +++ b/conf/imperial_mb.config @@ -0,0 +1,47 @@ +//Profile config names for nf-core/configs + +params { + // Config Params + config_profile_description = 'Imperial College London - MEDBIO QUEUE - HPC Profile -- provided by nf-core/configs.' + config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)' + config_profile_url = 'https://www.imperial.ac.uk/bioinformatics-data-science-group/resources/uk-med-bio/' + + // Resources + max_memory = 640.GB + max_cpus = 32 + max_time = 168.h +} + +workDir = "/rds/general/user/$USER/ephemeral/tmp" + +executor { + $pbspro { + queueSize = 50 + } + + $local { + cpus = 2 + queueSize = 1 + memory = '32 GB' + } +} + +singularity { + enabled = true + autoMounts = true + runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp" +} + +process { + + executor = 'pbspro' + queue = 'pqmedbio-tput' + + //queue = 'med-bio' //!! this is an alias and shouldn't be used + + withLabel:process_large { + queue = 'pqmedbio-large' + } + + +} \ No newline at end of file diff --git a/conf/pipeline/scflow/imperial.config b/conf/pipeline/scflow/imperial.config new file mode 100644 index 0000000..7f46466 --- /dev/null +++ b/conf/pipeline/scflow/imperial.config @@ -0,0 +1,18 @@ +// scflow/imperial specific profile config + +params { + // Config Params + config_profile_description = 'Imperial College London - HPC - nf-core/scFlow Profile -- provided by nf-core/configs.' + config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)' + + // Analysis Resource Params + ctd_folder = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/Analyses/scFlowResources/refs/ctd" + ensembl_mappings = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/Analyses/scFlowResources/src/ensembl-ids/ensembl_mappings.tsv" +} + +singularity { + enabled = true + autoMounts = true + cacheDir = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/.singularity-cache" + runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp" +} \ No newline at end of file diff --git a/docs/imperial.md b/docs/imperial.md new file mode 100644 index 0000000..43fc117 --- /dev/null +++ b/docs/imperial.md @@ -0,0 +1,16 @@ +# nf-core/configs: Imperial CX1 HPC Configuration + +All nf-core pipelines have been successfully configured for use on the CX1 cluster at Imperial College London HPC. + +To use, run the pipeline with `-profile imperial`. This will download and launch the [`imperial.config`](../conf/imperial.config) which has been pre-configured with a setup suitable for the CX1 cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. + +Before running the pipeline you will need to load Nextflow using the environment module system on the CX1 cluster. You can do this by issuing the commands below: + +```bash +## Load Nextflow and Singularity environment modules +module load Nextflow +``` + +>NB: You will need an account to use the HPC cluster CX1 in order to run the pipeline. If in doubt contact IT. +>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT. +>NB: To submit jobs to the Imperial College MEDBIO cluster, use `-profile imperial_mb` instead. diff --git a/docs/imperial_mb.md b/docs/imperial_mb.md new file mode 100644 index 0000000..d7f7f15 --- /dev/null +++ b/docs/imperial_mb.md @@ -0,0 +1,16 @@ +# nf-core/configs: Imperial MEDBIO HPC Configuration + +All nf-core pipelines have been successfully configured for use on the MEDBIO cluster at Imperial College London HPC. + +To use, run the pipeline with `-profile imperial_mb`. This will download and launch the [`imperial_mb.config`](../conf/imperial_mb.config) which has been pre-configured with a setup suitable for the MEDBIO cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. + +Before running the pipeline you will need to load Nextflow using the environment module system on the head node. You can do this by issuing the commands below: + +```bash +## Load Nextflow and Singularity environment modules +module load Nextflow +``` + +>NB: You will need an account to use the HPC cluster MEDBIO in order to run the pipeline. Access to the MEDBIO queue is exclusive. If in doubt contact IT. +>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT. +>NB: To submit jobs to the standard CX1 cluster at Imperial College, use `-profile imperial` instead. diff --git a/docs/pipeline/scflow/imperial.md b/docs/pipeline/scflow/imperial.md new file mode 100644 index 0000000..be2cc59 --- /dev/null +++ b/docs/pipeline/scflow/imperial.md @@ -0,0 +1,21 @@ +# nf-core/configs: Imperial scflow Specific Configuration + +Extra specific configuration for the scflow pipeline + +## Usage + +To use, run the pipeline with `-profile imperial` or `-profile imperial_mb`. + +This will download and launch the scflow specific [`imperial.config`](../../../conf/pipeline/scflow/imperial.config) which has been pre-configured with a setup suitable for the Imperial HPC cluster. + +Example: `nextflow run nf-core/scflow -profile imperial` + +## scflow specific configurations for Imperial + +Specific configurations for Imperial have been made for scflow. + +* Singularity `enabled` and `autoMounts` set to `true` +* Singularity `cacheDir` path set to an RDS location +* Singularity `runOptions` path set to bind (`-B`) RDS paths with container paths. +* Params `ctd_folder` set to an RDS location. +* Parms `ensembl_mappings` set to an RDS location. diff --git a/nfcore_custom.config b/nfcore_custom.config index 2b3c419..e31c38e 100644 --- a/nfcore_custom.config +++ b/nfcore_custom.config @@ -24,6 +24,8 @@ profiles { czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" } ebc { includeConfig "${params.custom_config_base}/conf/ebc.config" } icr_davros { includeConfig "${params.custom_config_base}/conf/icr_davros.config" } + imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" } + imperial_mb { includeConfig "${params.custom_config_base}/conf/imperial_mb.config" } genotoul { includeConfig "${params.custom_config_base}/conf/genotoul.config" } google { includeConfig "${params.custom_config_base}/conf/google.config" } denbi_qbic { includeConfig "${params.custom_config_base}/conf/denbi_qbic.config" } @@ -54,6 +56,8 @@ params { cfc: ['.hpc.uni-tuebingen.de'], crick: ['.thecrick.org'], icr_davros: ['.davros.compute.estate'], + imperial: ['.hpc.ic.ac.uk'], + imperial_mb: ['.hpc.ic.ac.uk'], genotoul: ['.genologin1.toulouse.inra.fr', '.genologin2.toulouse.inra.fr'], genouest: ['.genouest.org'], uppmax: ['.uppmax.uu.se'], diff --git a/pipeline/scflow.config b/pipeline/scflow.config new file mode 100644 index 0000000..8c4697f --- /dev/null +++ b/pipeline/scflow.config @@ -0,0 +1,14 @@ +/* + * ------------------------------------------------- + * nfcore/scflow custom profile Nextflow config file + * ------------------------------------------------- + * Config options for custom environments. + * Cluster-specific config options should be saved + * in the conf/pipeline/scflow folder and imported + * under a profile name here. + */ + +profiles { + imperial { includeConfig "${params.custom_config_base}/conf/pipeline/scflow/imperial.config" } + imperial_mb { includeConfig "${params.custom_config_base}/conf/pipeline/scflow/imperial.config" } // intended +} \ No newline at end of file From 4bb52358f5698dd23b9a4854c95547df25c7a3a2 Mon Sep 17 00:00:00 2001 From: Combiz Khozoie Date: Fri, 18 Dec 2020 10:27:40 +0000 Subject: [PATCH 21/46] updated Github Actions YML --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index dfb0782..e78bddd 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -16,7 +16,7 @@ jobs: needs: test_all_profiles strategy: matrix: - profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'kraken', 'mpcdf', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh'] + profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh'] steps: - uses: actions/checkout@v1 - name: Install Nextflow From 3824403b4e4b9a675919b46ea07a1e485a040940 Mon Sep 17 00:00:00 2001 From: Combiz Khozoie Date: Fri, 18 Dec 2020 10:31:36 +0000 Subject: [PATCH 22/46] removed workDir from Imperial configs --- conf/imperial.config | 2 -- conf/imperial_mb.config | 2 -- 2 files changed, 4 deletions(-) diff --git a/conf/imperial.config b/conf/imperial.config index 6308a69..0ca2498 100644 --- a/conf/imperial.config +++ b/conf/imperial.config @@ -12,8 +12,6 @@ params { max_time = 72.h } -workDir = "/rds/general/user/$USER/ephemeral/tmp" - executor { $pbspro { queueSize = 50 diff --git a/conf/imperial_mb.config b/conf/imperial_mb.config index 11b337a..eabeb83 100644 --- a/conf/imperial_mb.config +++ b/conf/imperial_mb.config @@ -12,8 +12,6 @@ params { max_time = 168.h } -workDir = "/rds/general/user/$USER/ephemeral/tmp" - executor { $pbspro { queueSize = 50 From 17cf4e3b35799592e48cfc79ccf32fd5c69abfdf Mon Sep 17 00:00:00 2001 From: Combiz Khozoie Date: Fri, 18 Dec 2020 15:34:56 +0000 Subject: [PATCH 23/46] fixed indents --- conf/imperial.config | 18 +++++++++--------- conf/imperial_mb.config | 19 +++++++++---------- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/conf/imperial.config b/conf/imperial.config index 0ca2498..f40d92b 100644 --- a/conf/imperial.config +++ b/conf/imperial.config @@ -13,15 +13,15 @@ params { } executor { - $pbspro { - queueSize = 50 - } + $pbspro { + queueSize = 50 + } - $local { - cpus = 2 - queueSize = 1 - memory = '32 GB' - } + $local { + cpus = 2 + queueSize = 1 + memory = '32 GB' + } } singularity { @@ -34,4 +34,4 @@ process { executor = 'pbspro' -} \ No newline at end of file +} diff --git a/conf/imperial_mb.config b/conf/imperial_mb.config index eabeb83..a89ffcd 100644 --- a/conf/imperial_mb.config +++ b/conf/imperial_mb.config @@ -13,15 +13,15 @@ params { } executor { - $pbspro { - queueSize = 50 - } + $pbspro { + queueSize = 50 + } - $local { - cpus = 2 - queueSize = 1 - memory = '32 GB' - } + $local { + cpus = 2 + queueSize = 1 + memory = '32 GB' + } } singularity { @@ -41,5 +41,4 @@ process { queue = 'pqmedbio-large' } - -} \ No newline at end of file +} From a4c55b3ea93a552cf12978ae0033b28bbe385c16 Mon Sep 17 00:00:00 2001 From: "James A. Fellows Yates" Date: Wed, 6 Jan 2021 10:44:12 +0100 Subject: [PATCH 24/46] Fix time limit 0-9 typo --- conf/pipeline/eager/shh.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/pipeline/eager/shh.config b/conf/pipeline/eager/shh.config index b358712..8accfa7 100644 --- a/conf/pipeline/eager/shh.config +++ b/conf/pipeline/eager/shh.config @@ -59,7 +59,7 @@ process { withLabel:'mc_huge'{ cpus = { check_max( 32, 'cpus' ) } memory = { check_max( 256.GB * task.attempt, 'memory' ) } - time = { task.attempt == 3 ? 1449.h : task.attempt == 2 ? 48.h : 2.h } + time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h } } } From 9b5ac5c89d1761e877956c55e4a0a74af512a6a6 Mon Sep 17 00:00:00 2001 From: rbpisupati Date: Fri, 15 Jan 2021 12:55:46 +0100 Subject: [PATCH 25/46] added anaconda module for cbe --- conf/cbe.config | 1 + 1 file changed, 1 insertion(+) diff --git a/conf/cbe.config b/conf/cbe.config index 0e9b43e..0a5763f 100755 --- a/conf/cbe.config +++ b/conf/cbe.config @@ -9,6 +9,7 @@ process { executor = 'slurm' queue = { task.memory <= 170.GB ? 'c' : 'm' } clusterOptions = { task.time <= 8.h ? '--qos short': task.time <= 48.h ? '--qos medium' : '--qos long' } + module = 'anaconda3/2019.10' } singularity { From c11c26f15ffa59d576acaa43a931a84b06120903 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Wed, 20 Jan 2021 23:45:26 +0100 Subject: [PATCH 26/46] Create ceh.config --- conf/ceh.config | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 conf/ceh.config diff --git a/conf/ceh.config b/conf/ceh.config new file mode 100644 index 0000000..1c54682 --- /dev/null +++ b/conf/ceh.config @@ -0,0 +1,31 @@ +//Profile config names for nf-core/configs + params { + config_profile_description = 'Centre for Evolutionary Hologenomics - CEH profile provided by nf-core/configs.' + config_profile_contact = 'Aashild Vaagene (@ashildv)' + } + +profiles { +ceh { + singularity { + enabled = true + autoMounts = true + cacheDir = '/shared/volume/hologenomics/data/cache/nf-eager/singularity' + } + process { + executor = 'slurm' + queue = { task.time < 24.h ? 'hologenomics-short' : task.time < 168.h ? 'hologenomics' : 'hologenomics-long' } + } + + cleanup = true + + executor { + queueSize = 8 + } + params { + max_memory = 250.GB + max_cpus = 35 + max_time = 720.h + } + } + } + From 8a8f95e364e8d0585053c927c1d596a9ca11862b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Wed, 20 Jan 2021 23:47:58 +0100 Subject: [PATCH 27/46] Update nfcore_custom.config --- nfcore_custom.config | 1 + 1 file changed, 1 insertion(+) diff --git a/nfcore_custom.config b/nfcore_custom.config index e31c38e..2519ee5 100644 --- a/nfcore_custom.config +++ b/nfcore_custom.config @@ -18,6 +18,7 @@ profiles { cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" } ccga_dx { includeConfig "${params.custom_config_base}/conf/ccga_dx.config" } ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" } + ceh { includeConfig "${params.custom_config_base}/conf/ceh.config"} cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" } cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" } crick { includeConfig "${params.custom_config_base}/conf/crick.config" } From 65831b73ef4c7e1257350810777f98ef46000d82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 00:17:08 +0100 Subject: [PATCH 28/46] Create ceh.md --- docs/ceh.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 docs/ceh.md diff --git a/docs/ceh.md b/docs/ceh.md new file mode 100644 index 0000000..b9c8b4f --- /dev/null +++ b/docs/ceh.md @@ -0,0 +1,21 @@ +# nf-core/configs: Centre for Evolutionary Hologenomics / EvoGenomics (hologenomics partition on HPC) Configuration + +The profile is configured to run with Singularity version 3.6.3-1.el7 which is part of the OS installtion and does not need to be loaded as a module. + +Before running the pipeline you will need to load Java, Miniconda and Nextflow. You can do this by including the commands below in your SLURM/sbatch script: + +```bash +## Load Java, Miniconda and Nextflow environment modules +module purge +module load lib +module load java/v1.8.0_202-jdk miniconda nextflow/v20.07.1.5412 +``` + +All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway. +The config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completeed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space. + +This configuration will automatically choose the correct SLURM queue (short,medium,long) depending on the time and memory required by each process. + +>NB: You will need an account to use the HPC cluster to run the pipeline. If in doubt contact IT. + +>NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be submitted from one of the login nodes. From a6f316b58b6c83cee74a6f62572c2a9ce590805a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 00:21:13 +0100 Subject: [PATCH 29/46] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a967049..42d2b91 100644 --- a/README.md +++ b/README.md @@ -102,6 +102,7 @@ Currently documentation is available for the following systems: * [CBE](docs/cbe.md) * [CCGA_DX](docs/ccga_dx.md) * [CCGA_MED](docs/ccga_med.md) +* [CEH] (docs/ceh.md) * [CFC](docs/cfc.md) * [CRICK](docs/crick.md) * [CZBIOHUB_AWS](docs/czbiohub.md) From 22e6a60db10edf5b19c0036a5d6ba2fa1e14ecf9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 00:22:13 +0100 Subject: [PATCH 30/46] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 42d2b91..1c1bcc4 100644 --- a/README.md +++ b/README.md @@ -102,7 +102,7 @@ Currently documentation is available for the following systems: * [CBE](docs/cbe.md) * [CCGA_DX](docs/ccga_dx.md) * [CCGA_MED](docs/ccga_med.md) -* [CEH] (docs/ceh.md) +* [CEH](docs/ceh.md) * [CFC](docs/cfc.md) * [CRICK](docs/crick.md) * [CZBIOHUB_AWS](docs/czbiohub.md) From 91594dffeaf5f9eceeb22ccf1e011ab2767627a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 00:29:43 +0100 Subject: [PATCH 31/46] Update ceh.config --- conf/ceh.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/ceh.config b/conf/ceh.config index 1c54682..ff81cfb 100644 --- a/conf/ceh.config +++ b/conf/ceh.config @@ -1,6 +1,6 @@ //Profile config names for nf-core/configs params { - config_profile_description = 'Centre for Evolutionary Hologenomics - CEH profile provided by nf-core/configs.' + config_profile_description = 'Center for Evolutionary Hologenomics / Section for Evolutionary Genomics @ GLOBE - CEH profile provided by nf-core/configs.' config_profile_contact = 'Aashild Vaagene (@ashildv)' } From d108666950703a521518535c38eddad32de372f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 00:30:54 +0100 Subject: [PATCH 32/46] Update ceh.md --- docs/ceh.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ceh.md b/docs/ceh.md index b9c8b4f..fff2921 100644 --- a/docs/ceh.md +++ b/docs/ceh.md @@ -1,4 +1,4 @@ -# nf-core/configs: Centre for Evolutionary Hologenomics / EvoGenomics (hologenomics partition on HPC) Configuration +# nf-core/configs: Center for Evolutionary Hologenomics & Section for Evolutionary Genomics (hologenomics partition on HPC) Configuration The profile is configured to run with Singularity version 3.6.3-1.el7 which is part of the OS installtion and does not need to be loaded as a module. From f8973f153a59833ff727c1caac7ed7f5667e52db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 00:31:49 +0100 Subject: [PATCH 33/46] Update ceh.config --- conf/ceh.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/ceh.config b/conf/ceh.config index ff81cfb..ee15f32 100644 --- a/conf/ceh.config +++ b/conf/ceh.config @@ -1,6 +1,6 @@ //Profile config names for nf-core/configs params { - config_profile_description = 'Center for Evolutionary Hologenomics / Section for Evolutionary Genomics @ GLOBE - CEH profile provided by nf-core/configs.' + config_profile_description = 'Center for Evolutionary Hologenomics & Section for Evolutionary Genomics @ GLOBE - CEH profile provided by nf-core/configs.' config_profile_contact = 'Aashild Vaagene (@ashildv)' } From 964cae4ac79cab4a1e209f826d9897a37d8d0d48 Mon Sep 17 00:00:00 2001 From: "James A. Fellows Yates" Date: Thu, 21 Jan 2021 06:22:37 +0100 Subject: [PATCH 34/46] Add CEH to CI --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e78bddd..f9ffee0 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -16,7 +16,7 @@ jobs: needs: test_all_profiles strategy: matrix: - profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh'] + profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'ceh', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh'] steps: - uses: actions/checkout@v1 - name: Install Nextflow From 2fd475c3398305e3377cc2e8af18cc5cb2c68180 Mon Sep 17 00:00:00 2001 From: "James A. Fellows Yates" Date: Thu, 21 Jan 2021 06:30:43 +0100 Subject: [PATCH 35/46] Fix some linting errors --- docs/ceh.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/ceh.md b/docs/ceh.md index fff2921..11fe720 100644 --- a/docs/ceh.md +++ b/docs/ceh.md @@ -1,6 +1,8 @@ # nf-core/configs: Center for Evolutionary Hologenomics & Section for Evolutionary Genomics (hologenomics partition on HPC) Configuration -The profile is configured to run with Singularity version 3.6.3-1.el7 which is part of the OS installtion and does not need to be loaded as a module. +> **NB:** You will need an account to use the HPC cluster to run the pipeline. If in doubt contact IT. + +The profile is configured to run with Singularity version 3.6.3-1.el7 which is part of the OS installtion and does not need to be loaded as a module. Before running the pipeline you will need to load Java, Miniconda and Nextflow. You can do this by including the commands below in your SLURM/sbatch script: @@ -16,6 +18,4 @@ The config contains a `cleanup` command that removes the `work/` directory autom This configuration will automatically choose the correct SLURM queue (short,medium,long) depending on the time and memory required by each process. ->NB: You will need an account to use the HPC cluster to run the pipeline. If in doubt contact IT. - ->NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be submitted from one of the login nodes. +> **NB:** Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be submitted from one of the login nodes. From 8fa720b9cc4afe6c38eb8bb44e00ad0a0e5074ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 11:15:06 +0100 Subject: [PATCH 36/46] Update conf/ceh.config Co-authored-by: James A. Fellows Yates --- conf/ceh.config | 1 + 1 file changed, 1 insertion(+) diff --git a/conf/ceh.config b/conf/ceh.config index ee15f32..77389a7 100644 --- a/conf/ceh.config +++ b/conf/ceh.config @@ -2,6 +2,7 @@ params { config_profile_description = 'Center for Evolutionary Hologenomics & Section for Evolutionary Genomics @ GLOBE - CEH profile provided by nf-core/configs.' config_profile_contact = 'Aashild Vaagene (@ashildv)' + config_profile_url = 'https://ceh.ku.dk/ } profiles { From ef80452577b18aa40ed2e1e63eab62e0abe3c3c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 11:16:46 +0100 Subject: [PATCH 37/46] Update docs/ceh.md Co-authored-by: James A. Fellows Yates --- docs/ceh.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ceh.md b/docs/ceh.md index 11fe720..1381613 100644 --- a/docs/ceh.md +++ b/docs/ceh.md @@ -4,7 +4,7 @@ The profile is configured to run with Singularity version 3.6.3-1.el7 which is part of the OS installtion and does not need to be loaded as a module. -Before running the pipeline you will need to load Java, Miniconda and Nextflow. You can do this by including the commands below in your SLURM/sbatch script: +Before running the pipeline you will need to load Java, and Nextflow. You can do this by including the commands below in your SLURM/sbatch script: ```bash ## Load Java, Miniconda and Nextflow environment modules From 594985f1eeac1bbf0c9cd535fc6e60ce9faa466d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 11:43:34 +0100 Subject: [PATCH 38/46] Update and rename ceh.config to seg_globe.config --- conf/{ceh.config => seg_globe.config} | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename conf/{ceh.config => seg_globe.config} (75%) diff --git a/conf/ceh.config b/conf/seg_globe.config similarity index 75% rename from conf/ceh.config rename to conf/seg_globe.config index 77389a7..390d0ef 100644 --- a/conf/ceh.config +++ b/conf/seg_globe.config @@ -1,8 +1,8 @@ //Profile config names for nf-core/configs params { - config_profile_description = 'Center for Evolutionary Hologenomics & Section for Evolutionary Genomics @ GLOBE - CEH profile provided by nf-core/configs.' + config_profile_description = 'Section for Evolutionary Genomics @ GLOBE, University of Copenhagen - seg_globe profile provided by nf-core/configs.' config_profile_contact = 'Aashild Vaagene (@ashildv)' - config_profile_url = 'https://ceh.ku.dk/ + config_profile_url = 'https://globe.ku.dk/research/evogenomics/' } profiles { From bcd137225e293a4cdc93a822f81c53f67e9ea3dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 11:46:46 +0100 Subject: [PATCH 39/46] Update and rename ceh.md to seg_globe.md --- docs/{ceh.md => seg_globe.md} | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename docs/{ceh.md => seg_globe.md} (74%) diff --git a/docs/ceh.md b/docs/seg_globe.md similarity index 74% rename from docs/ceh.md rename to docs/seg_globe.md index 1381613..1fa832f 100644 --- a/docs/ceh.md +++ b/docs/seg_globe.md @@ -1,16 +1,16 @@ -# nf-core/configs: Center for Evolutionary Hologenomics & Section for Evolutionary Genomics (hologenomics partition on HPC) Configuration +# nf-core/configs: Section for Evolutionary Genomics at GLOBE, Univeristy of Copenhagen (hologenomics partition on HPC) Configuration > **NB:** You will need an account to use the HPC cluster to run the pipeline. If in doubt contact IT. The profile is configured to run with Singularity version 3.6.3-1.el7 which is part of the OS installtion and does not need to be loaded as a module. -Before running the pipeline you will need to load Java, and Nextflow. You can do this by including the commands below in your SLURM/sbatch script: +Before running the pipeline you will need to load Java and Nextflow. You can do this by including the commands below in your SLURM/sbatch script: ```bash -## Load Java, Miniconda and Nextflow environment modules +## Load Java and Nextflow environment modules module purge module load lib -module load java/v1.8.0_202-jdk miniconda nextflow/v20.07.1.5412 +module load java/v1.8.0_202-jdk nextflow/v20.07.1.5412 ``` All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway. From 8037b73ddd6a5a1fae3803c65915c4c27f1180ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 11:52:58 +0100 Subject: [PATCH 40/46] Update nfcore_custom.config --- nfcore_custom.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nfcore_custom.config b/nfcore_custom.config index 2519ee5..7310de5 100644 --- a/nfcore_custom.config +++ b/nfcore_custom.config @@ -18,7 +18,6 @@ profiles { cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" } ccga_dx { includeConfig "${params.custom_config_base}/conf/ccga_dx.config" } ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" } - ceh { includeConfig "${params.custom_config_base}/conf/ceh.config"} cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" } cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" } crick { includeConfig "${params.custom_config_base}/conf/crick.config" } @@ -39,6 +38,7 @@ profiles { pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" } phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" } prince { includeConfig "${params.custom_config_base}/conf/prince.config" } + seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"} shh { includeConfig "${params.custom_config_base}/conf/shh.config" } uct_hpc { includeConfig "${params.custom_config_base}/conf/uct_hpc.config" } uppmax { includeConfig "${params.custom_config_base}/conf/uppmax.config" } From 765a2e0b7ed6460df9f980cfee350f5b7a7884f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=85shild=20J=2E=20V=C3=A5gene?= <60298098+ashildv@users.noreply.github.com> Date: Thu, 21 Jan 2021 11:55:00 +0100 Subject: [PATCH 41/46] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1c1bcc4..a590599 100644 --- a/README.md +++ b/README.md @@ -102,7 +102,6 @@ Currently documentation is available for the following systems: * [CBE](docs/cbe.md) * [CCGA_DX](docs/ccga_dx.md) * [CCGA_MED](docs/ccga_med.md) -* [CEH](docs/ceh.md) * [CFC](docs/cfc.md) * [CRICK](docs/crick.md) * [CZBIOHUB_AWS](docs/czbiohub.md) @@ -120,6 +119,7 @@ Currently documentation is available for the following systems: * [PASTEUR](docs/pasteur.md) * [PHOENIX](docs/phoenix.md) * [PRINCE](docs/prince.md) +* [SEG_GLOBE](docs/seg_globe.md) * [SHH](docs/shh.md) * [UCT_HPC](docs/uct_hpc.md) * [UPPMAX](docs/uppmax.md) From 7cffcf809bdd0070d317085923499bacc96c9068 Mon Sep 17 00:00:00 2001 From: "James A. Fellows Yates" Date: Thu, 21 Jan 2021 12:50:14 +0100 Subject: [PATCH 42/46] Update main.yml --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f9ffee0..ea2117d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -16,7 +16,7 @@ jobs: needs: test_all_profiles strategy: matrix: - profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'ceh', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh'] + profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'pasteur', 'phoenix', 'prince', 'seg_globe', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh'] steps: - uses: actions/checkout@v1 - name: Install Nextflow From c4604450d864392741e37867f622298bf2fc4626 Mon Sep 17 00:00:00 2001 From: "James A. Fellows Yates" Date: Thu, 21 Jan 2021 13:04:12 +0100 Subject: [PATCH 43/46] Update seg_globe.md --- docs/seg_globe.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/seg_globe.md b/docs/seg_globe.md index 1fa832f..da03737 100644 --- a/docs/seg_globe.md +++ b/docs/seg_globe.md @@ -4,13 +4,13 @@ The profile is configured to run with Singularity version 3.6.3-1.el7 which is part of the OS installtion and does not need to be loaded as a module. -Before running the pipeline you will need to load Java and Nextflow. You can do this by including the commands below in your SLURM/sbatch script: +Before running the pipeline you will need to load Java, miniconda and Nextflow. You can do this by including the commands below in your SLURM/sbatch script: ```bash ## Load Java and Nextflow environment modules module purge module load lib -module load java/v1.8.0_202-jdk nextflow/v20.07.1.5412 +module load java/v1.8.0_202-jdk miniconda nextflow/v20.07.1.5412 ``` All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway. From 26d164ca74ccc25ba7ee1e6e8fb9843ee7563f65 Mon Sep 17 00:00:00 2001 From: "James A. Fellows Yates" Date: Thu, 21 Jan 2021 13:15:08 +0100 Subject: [PATCH 44/46] Remove ceh profile for now --- conf/seg_globe.config | 53 ++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 29 deletions(-) diff --git a/conf/seg_globe.config b/conf/seg_globe.config index 390d0ef..41a3d6e 100644 --- a/conf/seg_globe.config +++ b/conf/seg_globe.config @@ -1,32 +1,27 @@ //Profile config names for nf-core/configs - params { - config_profile_description = 'Section for Evolutionary Genomics @ GLOBE, University of Copenhagen - seg_globe profile provided by nf-core/configs.' - config_profile_contact = 'Aashild Vaagene (@ashildv)' - config_profile_url = 'https://globe.ku.dk/research/evogenomics/' - } +params { + config_profile_description = 'Section for Evolutionary Genomics @ GLOBE, University of Copenhagen - seg_globe profile provided by nf-core/configs.' + config_profile_contact = 'Aashild Vaagene (@ashildv)' + config_profile_url = 'https://globe.ku.dk/research/evogenomics/' + max_memory = 250.GB + max_cpus = 35 + max_time = 720.h +} -profiles { -ceh { - singularity { - enabled = true - autoMounts = true - cacheDir = '/shared/volume/hologenomics/data/cache/nf-eager/singularity' - } - process { - executor = 'slurm' - queue = { task.time < 24.h ? 'hologenomics-short' : task.time < 168.h ? 'hologenomics' : 'hologenomics-long' } - } +singularity { + enabled = true + autoMounts = true + cacheDir = '/shared/volume/hologenomics/data/cache/nf-eager/singularity' +} + +process { + executor = 'slurm' + queue = { task.time < 24.h ? 'hologenomics-short' : task.time < 168.h ? 'hologenomics' : 'hologenomics-long' } +} - cleanup = true - - executor { - queueSize = 8 - } - params { - max_memory = 250.GB - max_cpus = 35 - max_time = 720.h - } - } - } - +cleanup = true + +executor { + queueSize = 8 +} + From 713a032a36d22b96e1a2cae96a3c78fa6b40f3fb Mon Sep 17 00:00:00 2001 From: "Thiseas C. Lamnidis" Date: Tue, 26 Jan 2021 15:08:21 +0100 Subject: [PATCH 45/46] Add debug profile which deactivates cleanup of workdir after successful run --- conf/shh.config | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/conf/shh.config b/conf/shh.config index dc51e87..ab3dcca 100644 --- a/conf/shh.config +++ b/conf/shh.config @@ -10,6 +10,7 @@ params { igenomes_base = "/projects1/public_data/igenomes/" } +// Preform work directory cleanup after a successful run cleanup = true singularity { @@ -46,4 +47,8 @@ profiles { queue = { task.memory > 756.GB || task.cpus > 64 ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' } } } + // Profile to deactivate automatic cleanup of work directory after a successful run. Overwrites cleanup option. + debug { + cleanup = false + } } From 1608b36dcf0624302b065f60bb75b9b2f23ce487 Mon Sep 17 00:00:00 2001 From: Charles Plessy Date: Tue, 2 Feb 2021 17:40:10 +0900 Subject: [PATCH 46/46] Institutional profile for Okinawa Institute of Science and Technology --- .github/workflows/main.yml | 2 +- README.md | 1 + conf/oist.config | 22 ++++++++++++++++++++++ docs/oist.md | 33 +++++++++++++++++++++++++++++++++ nfcore_custom.config | 1 + 5 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 conf/oist.config create mode 100644 docs/oist.md diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ea2117d..3e5c930 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -16,7 +16,7 @@ jobs: needs: test_all_profiles strategy: matrix: - profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'pasteur', 'phoenix', 'prince', 'seg_globe', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh'] + profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'oist', 'pasteur', 'phoenix', 'prince', 'seg_globe', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh'] steps: - uses: actions/checkout@v1 - name: Install Nextflow diff --git a/README.md b/README.md index a590599..45721e8 100644 --- a/README.md +++ b/README.md @@ -116,6 +116,7 @@ Currently documentation is available for the following systems: * [KRAKEN](docs/kraken.md) * [MPCDF](docs/mpcdf.md) * [MUNIN](docs/munin.md) +* [OIST](docs/oist.md) * [PASTEUR](docs/pasteur.md) * [PHOENIX](docs/phoenix.md) * [PRINCE](docs/prince.md) diff --git a/conf/oist.config b/conf/oist.config new file mode 100644 index 0000000..8815ed4 --- /dev/null +++ b/conf/oist.config @@ -0,0 +1,22 @@ +//Profile config names for nf-core/configs +params { + config_profile_description = 'The Okinawa Institute of Science and Technology Graduate University (OIST) HPC cluster profile provided by nf-core/configs.' + config_profile_contact = 'OISTs Bioinformatics User Group ' + config_profile_url = 'https://github.com/nf-core/configs/blob/master/docs/oist.md' +} + +singularity { + enabled = true +} + +process { + executor = 'slurm' + queue = 'compute' + clusterOptions = '-C zen2' +} + +params { + max_memory = 500.GB + max_cpus = 128 + max_time = 90.h +} diff --git a/docs/oist.md b/docs/oist.md new file mode 100644 index 0000000..8c4a68c --- /dev/null +++ b/docs/oist.md @@ -0,0 +1,33 @@ +# nf-core/configs: OIST Configuration + +The nf-core pipelines [rnaseq](https://nf-co.re/rnaseq) and +[eager](https://nf-co.re/eager) have been successfully tested on the _Deigo_ +cluster at the Okinawa Institute of Science and Technology Graduate University +([OIST](https://www.oist.jp)). We have no reason to expect that other +pipelines would not work. + +To use, run the pipeline with `-profile oist`. This will download and launch +the [`oist.config`](../conf/oist.config) which has been pre-configured with a +setup suitable for _Deigo_. Using this profile, a docker image containing all +of the required software will be downloaded, and converted to a Singularity +image before execution of the pipeline. + +## Below are non-mandatory information e.g. on modules to load etc + +Before running the pipeline you will need to load Nextflow and Singularity +using the environment module system on _Deigo_. You can do this by issuing the +commands below: + +```bash +## Load the latest Nextflow and Singularity environment modules +ml purge +ml bioinfo-ugrp-modules +ml Other/Nextflow +``` + +>NB: You will need an account to use the _Deigo_ cluster in order to run the +>pipeline. If in doubt contact IT. +> +>NB: Nextflow will submit the jobs via the SLURM scheduler to the HPC cluster +>and as such the commands above will have to be executed on one of the login +>nodes. If in doubt contact IT. diff --git a/nfcore_custom.config b/nfcore_custom.config index 7310de5..429e3c1 100644 --- a/nfcore_custom.config +++ b/nfcore_custom.config @@ -35,6 +35,7 @@ profiles { kraken { includeConfig "${params.custom_config_base}/conf/kraken.config" } mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" } munin { includeConfig "${params.custom_config_base}/conf/munin.config" } + oist { includeConfig "${params.custom_config_base}/conf/oist.config" } pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" } phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" } prince { includeConfig "${params.custom_config_base}/conf/prince.config" }