1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-25 09:19:56 +00:00

Merge branch 'master' into master

This commit is contained in:
Evan Floden 2020-03-06 10:29:50 +00:00 committed by GitHub
commit d8ad376f71
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 8 additions and 65 deletions

View file

@ -97,10 +97,9 @@ Currently documentation is available for the following systems:
* [BIGPURPLE](docs/bigpurple.md) * [BIGPURPLE](docs/bigpurple.md)
* [BINAC](docs/binac.md) * [BINAC](docs/binac.md)
* [CBE](docs/cbe.md) * [CBE](docs/cbe.md)
* [CCGA](docs/ccga.md)
* [CCGA_DX](docs/ccga_dx.md) * [CCGA_DX](docs/ccga_dx.md)
* [CCGA_MED](docs/ccga_med.md) * [CCGA_MED](docs/ccga_med.md)
* [CFC](docs/binac.md) * [CFC](docs/cfc.md)
* [CRICK](docs/crick.md) * [CRICK](docs/crick.md)
* [CZBIOHUB_AWS](docs/czbiohub.md) * [CZBIOHUB_AWS](docs/czbiohub.md)
* [CZBIOHUB_AWS_HIGHPRIORITY](docs/czbiohub.md) * [CZBIOHUB_AWS_HIGHPRIORITY](docs/czbiohub.md)

View file

@ -1,41 +0,0 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'CCGA cluster profile provided by nf-core/configs.'
config_profile_contact = 'Marc Hoeppner (@marchoeppner)'
config_profile_url = 'https://www.ccga.uni-kiel.de/'
}
/*
* -------------------------------------------------
* Nextflow config file for CCGA cluster in Kiel
* -------------------------------------------------
*/
singularity {
enabled = true
runOptions = "-B /ifs -B /scratch -B /work_beegfs"
cacheDir = "/ifs/data/nfs_share/ikmb_repository/singularity_cache/"
}
executor {
queueSize=100
}
process {
// Global process config
executor = 'slurm'
queue = 'ikmb_a'
clusterOptions = { "--qos=ikmb_a" }
}
params {
// illumina iGenomes reference file paths on RZCluster
igenomes_base = '/ifs/data/nfs_share/ikmb_repository/references/iGenomes/references/'
saveReference = true
max_memory = 128.GB
max_cpus = 16
max_time = 120.h
}

View file

@ -13,6 +13,7 @@ singularity {
process { process {
beforeScript = 'module load devel/singularity/3.4.2' beforeScript = 'module load devel/singularity/3.4.2'
executor = 'slurm' executor = 'slurm'
queue = { task.memory > 60.GB || task.cpus > 20 ? 'qbic' : 'compute' }
} }
weblog{ weblog{
@ -22,7 +23,7 @@ weblog{
params { params {
igenomes_base = '/nfsmounts/igenomes' igenomes_base = '/nfsmounts/igenomes'
max_memory = 498.GB max_memory = 1999.GB
max_cpus = 20 max_cpus = 128
max_time = 140.h max_time = 140.h
} }

View file

@ -1,18 +0,0 @@
# nf-core/configs: CCGA Configuration
Deployment and testing of nf-core pipelines at the CCGA cluster is on-going.
To use, run the pipeline with `-profile ccga`. This will download and launch the [`ccga.config`](../conf/ccga.config) which has been pre-configured with a setup suitable for the CCGA cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on the cluster. You can do this by issuing the commands below:
```bash
## Load Nextflow and Singularity environment modules
module purge
module load IKMB
module load Java/1.8.0
module load Nextflow
module load singularity3.1.0
```
>NB: Access to the CCGA cluster is restricted to IKMB/CCGA employes. Please talk to Marc Hoeppner to get access (@marchoeppner).

View file

@ -10,8 +10,11 @@ Before running the pipeline you will need to load Nextflow and Singularity using
## Load Nextflow and Singularity environment modules ## Load Nextflow and Singularity environment modules
module purge module purge
module load devel/java_jdk/1.8.0u121 module load devel/java_jdk/1.8.0u121
module load qbic/singularity_slurm/3.0.3 module load devel/singularity/3.4.2
``` ```
>NB: You will need an account to use the HPC cluster CFC in order to run the pipeline. If in doubt contact IT. >NB: You will need an account to use the HPC cluster CFC in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT. >NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
New HighMem nodes are available on the cfc, Partition/queue name is qbic, each node has 128Cores and 2TB of RAM.
The old queue (Name compute) is set to default by slurm.
Each process over 60GB of RAM or over 20cpus should run on those new nodes.

View file

@ -14,7 +14,6 @@ profiles {
bigpurple { includeConfig "${params.custom_config_base}/conf/bigpurple.config" } bigpurple { includeConfig "${params.custom_config_base}/conf/bigpurple.config" }
binac { includeConfig "${params.custom_config_base}/conf/binac.config" } binac { includeConfig "${params.custom_config_base}/conf/binac.config" }
cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" } cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" }
ccga { includeConfig "${params.custom_config_base}/conf/ccga.config" }
ccga_dx { includeConfig "${params.custom_config_base}/conf/ccga_dx.config" } ccga_dx { includeConfig "${params.custom_config_base}/conf/ccga_dx.config" }
ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" } ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" }
cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" } cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" }