From 494aa8b53e9830a6f2d332e0d3ee40447a6dda8d Mon Sep 17 00:00:00 2001 From: Alexander Peltzer Date: Mon, 28 Oct 2019 13:15:54 +0100 Subject: [PATCH 1/4] Mini fix --- conf/binac.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/binac.config b/conf/binac.config index e2bce86..cba2ab6 100644 --- a/conf/binac.config +++ b/conf/binac.config @@ -13,7 +13,7 @@ process { beforeScript = 'module load devel/singularity/3.0.3' executor = 'pbs' queue = 'short' - process.queue = { task.memory > 128.GB ? 'smp': task.time <= 20m ? 'tiny' : task.time <= 48.h ? 'short' : task.time <= 168.h ? 'long'} + process.queue = { task.memory > 128.GB ? 'smp': task.time <= 20.m ? 'tiny' : task.time <= 48.h ? 'short' : task.time <= 168.h ? 'long'} } params { From 41e607e7f98c67cb181ceffa625002473997e0c2 Mon Sep 17 00:00:00 2001 From: Alexander Peltzer Date: Mon, 28 Oct 2019 13:17:39 +0100 Subject: [PATCH 2/4] Fix expression --- conf/binac.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/binac.config b/conf/binac.config index cba2ab6..748c9e6 100644 --- a/conf/binac.config +++ b/conf/binac.config @@ -13,7 +13,7 @@ process { beforeScript = 'module load devel/singularity/3.0.3' executor = 'pbs' queue = 'short' - process.queue = { task.memory > 128.GB ? 'smp': task.time <= 20.m ? 'tiny' : task.time <= 48.h ? 'short' : task.time <= 168.h ? 'long'} + process.queue = { task.memory > 128.GB ? 'smp': task.time <= 20.m ? 'tiny' : task.time <= 48.h ? 'short' : task.time <= 168.h ? 'short' : 'long'} } params { From e557ced1844e6bb6ce74e8f2b038abe2b7024819 Mon Sep 17 00:00:00 2001 From: Alexander Peltzer Date: Mon, 28 Oct 2019 13:21:41 +0100 Subject: [PATCH 3/4] Update max memory --- conf/binac.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/binac.config b/conf/binac.config index 748c9e6..f669793 100644 --- a/conf/binac.config +++ b/conf/binac.config @@ -18,7 +18,7 @@ process { params { igenomes_base = '/nfsmounts/igenomes' - max_memory = 1024.GB + max_memory = 1000.GB max_cpus = 28 max_time = 168.h } From d2603a134ed9e1922ad18db8fe747140ae8d8391 Mon Sep 17 00:00:00 2001 From: Alexander Peltzer Date: Mon, 11 Nov 2019 12:26:19 +0100 Subject: [PATCH 4/4] Add in denbi cloud configs --- conf/denbi_qbic.config | 26 ++++++++++++++++++++++++++ docs/denbi_qbic.md | 8 ++++++++ 2 files changed, 34 insertions(+) create mode 100644 conf/denbi_qbic.config create mode 100644 docs/denbi_qbic.md diff --git a/conf/denbi_qbic.config b/conf/denbi_qbic.config new file mode 100644 index 0000000..d5f1882 --- /dev/null +++ b/conf/denbi_qbic.config @@ -0,0 +1,26 @@ +//Profile config names for nf-core/configs +params { + config_profile_description = 'de.NBI cluster profile provided by nf-core/configs.' + config_profile_contact = 'Alexander Peltzer (@apeltzer)' + config_profile_url = 'https://cloud.denbi.de/' +} + +singularity { + enabled = true +} + +process { + executor = 'pbs' + queue = 'batch' +} + +params { + max_memory = 512.GB + max_cpus = 28 + max_time = 960.h +} + +weblog{ + enabled = true + url = 'https://services.qbic.uni-tuebingen.de/flowstore/workflows' +} diff --git a/docs/denbi_qbic.md b/docs/denbi_qbic.md new file mode 100644 index 0000000..220aac2 --- /dev/null +++ b/docs/denbi_qbic.md @@ -0,0 +1,8 @@ +# nf-core/configs: de.NBI QBIC Configuration + +All nf-core pipelines have been successfully configured for use on the de.NBI Cloud cluster. This is a virtual cluster that has been set up using the [virtual cluster setup scripts](https://github.com/MaximilianHanussek/virtual_cluster_local_ips). + +To use, run the pipeline with `-profile denbi_qbic`. This will download and launch the [`denbi_qbic.config`](../conf/denbi_qbic.config) which has been pre-configured with a setup suitable for the automatically created cluster. Using this profile, a Docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. + +>NB: You will need an account to use de.NBI Cluster in order to run the pipeline. If in doubt contact IT. +>NB: Nextflow will need to submit the jobs via the job scheduler to the cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.