From 6c5a5c8bcb5b548c00cb25a5ee9016853a809603 Mon Sep 17 00:00:00 2001 From: Marc Hoeppner Date: Mon, 24 Feb 2020 12:01:43 +0100 Subject: [PATCH 1/5] Removing old CCGA profile from docs and linting Removing CCGA profile from master config Removing CCGA profile from master config --- .github/workflows/main.yml | 2 +- README.md | 1 - conf/ccga.config | 41 -------------------------------------- docs/ccga.md | 18 ----------------- nfcore_custom.config | 1 - 5 files changed, 1 insertion(+), 62 deletions(-) delete mode 100644 conf/ccga.config delete mode 100644 docs/ccga.md diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 5fb2601..6f96a98 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -16,7 +16,7 @@ jobs: needs: test_all_profiles strategy: matrix: - profile: ['awsbatch', 'bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'ccga', 'cfc', 'crick', 'denbi_qbic', 'genotoul', 'genouest', 'gis', 'hebbe', 'kraken', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hex', 'uppmax', 'uzh'] + profile: ['awsbatch', 'bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'crick', 'denbi_qbic', 'genotoul', 'genouest', 'gis', 'hebbe', 'kraken', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hex', 'uppmax', 'uzh'] steps: - uses: actions/checkout@v1 - name: Install Nextflow diff --git a/README.md b/README.md index 3011a00..7634409 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,6 @@ Currently documentation is available for the following systems: * [BIGPURPLE](docs/bigpurple.md) * [BINAC](docs/binac.md) * [CBE](docs/cbe.md) -* [CCGA](docs/ccga.md) * [CCGA_DX](docs/ccga_dx.md) * [CCGA_MED](docs/ccga_med.md) * [CFC](docs/binac.md) diff --git a/conf/ccga.config b/conf/ccga.config deleted file mode 100644 index 6163626..0000000 --- a/conf/ccga.config +++ /dev/null @@ -1,41 +0,0 @@ -//Profile config names for nf-core/configs -params { - config_profile_description = 'CCGA cluster profile provided by nf-core/configs.' - config_profile_contact = 'Marc Hoeppner (@marchoeppner)' - config_profile_url = 'https://www.ccga.uni-kiel.de/' -} - -/* - * ------------------------------------------------- - * Nextflow config file for CCGA cluster in Kiel - * ------------------------------------------------- - */ - -singularity { - enabled = true - runOptions = "-B /ifs -B /scratch -B /work_beegfs" - cacheDir = "/ifs/data/nfs_share/ikmb_repository/singularity_cache/" -} - -executor { - queueSize=100 -} - -process { - - // Global process config - executor = 'slurm' - queue = 'ikmb_a' - - clusterOptions = { "--qos=ikmb_a" } - -} - -params { - // illumina iGenomes reference file paths on RZCluster - igenomes_base = '/ifs/data/nfs_share/ikmb_repository/references/iGenomes/references/' - saveReference = true - max_memory = 128.GB - max_cpus = 16 - max_time = 120.h -} diff --git a/docs/ccga.md b/docs/ccga.md deleted file mode 100644 index 798df29..0000000 --- a/docs/ccga.md +++ /dev/null @@ -1,18 +0,0 @@ -# nf-core/configs: CCGA Configuration - -Deployment and testing of nf-core pipelines at the CCGA cluster is on-going. - -To use, run the pipeline with `-profile ccga`. This will download and launch the [`ccga.config`](../conf/ccga.config) which has been pre-configured with a setup suitable for the CCGA cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. - -Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on the cluster. You can do this by issuing the commands below: - -```bash -## Load Nextflow and Singularity environment modules -module purge -module load IKMB -module load Java/1.8.0 -module load Nextflow -module load singularity3.1.0 -``` - ->NB: Access to the CCGA cluster is restricted to IKMB/CCGA employes. Please talk to Marc Hoeppner to get access (@marchoeppner). diff --git a/nfcore_custom.config b/nfcore_custom.config index 8b4a3a9..0fcbe66 100644 --- a/nfcore_custom.config +++ b/nfcore_custom.config @@ -17,7 +17,6 @@ profiles { bigpurple { includeConfig "${params.custom_config_base}/conf/bigpurple.config" } binac { includeConfig "${params.custom_config_base}/conf/binac.config" } cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" } - ccga { includeConfig "${params.custom_config_base}/conf/ccga.config" } ccga_dx { includeConfig "${params.custom_config_base}/conf/ccga_dx.config" } ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" } cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" } From 8e42bbd7d055acae8fff3ed8fca372979a96adfa Mon Sep 17 00:00:00 2001 From: mseybold Date: Fri, 6 Mar 2020 10:25:08 +0100 Subject: [PATCH 2/5] Update cfc.config We just added some new HighMem nodes to the cfc, Partition/queue name is qbic, each node has 128Cores and 2TB of RAM. The old queue (Name compute) is set to default by slurm. Each process over 60GB of RAM or over 20cpus should run on those new nodes. So added the queue State and adjusted the max_memory and max_cpu setting. --- conf/cfc.config | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/conf/cfc.config b/conf/cfc.config index 763dcf0..99917ba 100644 --- a/conf/cfc.config +++ b/conf/cfc.config @@ -13,6 +13,7 @@ singularity { process { beforeScript = 'module load devel/singularity/3.4.2' executor = 'slurm' + queue = { task.memory > 60.GB || task.cpus > 20 ? 'qbic' } } weblog{ @@ -22,7 +23,7 @@ weblog{ params { igenomes_base = '/nfsmounts/igenomes' - max_memory = 498.GB - max_cpus = 20 + max_memory = 1999.GB + max_cpus = 128 max_time = 140.h } From 52842de88b0d64e2bdbfdf61a833b975879c2546 Mon Sep 17 00:00:00 2001 From: mseybold Date: Fri, 6 Mar 2020 10:46:37 +0100 Subject: [PATCH 3/5] Update cfc.config test failed, so added the the default queue --- conf/cfc.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/cfc.config b/conf/cfc.config index 99917ba..d1a109a 100644 --- a/conf/cfc.config +++ b/conf/cfc.config @@ -13,7 +13,7 @@ singularity { process { beforeScript = 'module load devel/singularity/3.4.2' executor = 'slurm' - queue = { task.memory > 60.GB || task.cpus > 20 ? 'qbic' } + queue = { task.memory > 60.GB || task.cpus > 20 ? 'qbic' : 'compute' } } weblog{ From 2135996b13220dc76b4fe1b1196384307796c317 Mon Sep 17 00:00:00 2001 From: mseybold Date: Fri, 6 Mar 2020 11:12:21 +0100 Subject: [PATCH 4/5] Update cfc.md --- docs/cfc.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/cfc.md b/docs/cfc.md index a7f6beb..acf93dd 100644 --- a/docs/cfc.md +++ b/docs/cfc.md @@ -10,8 +10,11 @@ Before running the pipeline you will need to load Nextflow and Singularity using ## Load Nextflow and Singularity environment modules module purge module load devel/java_jdk/1.8.0u121 -module load qbic/singularity_slurm/3.0.3 +module load devel/singularity/3.4.2 ``` >NB: You will need an account to use the HPC cluster CFC in order to run the pipeline. If in doubt contact IT. >NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT. +New HighMem nodes are available on the cfc, Partition/queue name is qbic, each node has 128Cores and 2TB of RAM. +The old queue (Name compute) is set to default by slurm. +Each process over 60GB of RAM or over 20cpus should run on those new nodes. From 48400b8863b11dc2e70917fa3d586e56e5e41f6f Mon Sep 17 00:00:00 2001 From: mseybold Date: Fri, 6 Mar 2020 11:14:27 +0100 Subject: [PATCH 5/5] Update RADME.md fixed link to cfc.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0e58b09..3b360a1 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ Currently documentation is available for the following systems: * [CCGA](docs/ccga.md) * [CCGA_DX](docs/ccga_dx.md) * [CCGA_MED](docs/ccga_med.md) -* [CFC](docs/binac.md) +* [CFC](docs/cfc.md) * [CRICK](docs/crick.md) * [CZBIOHUB_AWS](docs/czbiohub.md) * [CZBIOHUB_AWS_HIGHPRIORITY](docs/czbiohub.md)