diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 56755f1..e4d68ca 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -67,6 +67,7 @@ jobs: - "ifb_core" - "imperial" - "jax" + - "ku_sund_dangpu" - "lugh" - "marvin" - "medair" diff --git a/README.md b/README.md index 5264a3c..838d288 100644 --- a/README.md +++ b/README.md @@ -122,6 +122,7 @@ Currently documentation is available for the following systems: - [ICR_DAVROS](docs/icr_davros.md) - [IMPERIAL](docs/imperial.md) - [JAX](docs/jax.md) +- [KU SUND DANGPU](docs/ku_sund_dangpu.md) - [LUGH](docs/lugh.md) - [MAESTRO](docs/maestro.md) - [MARVIN](docs/marvin.md) diff --git a/conf/biohpc_gen.config b/conf/biohpc_gen.config index e3f4069..694a25f 100755 --- a/conf/biohpc_gen.config +++ b/conf/biohpc_gen.config @@ -11,9 +11,8 @@ env { process { executor = 'slurm' - queue = { task.memory <= 1536.GB ? (task.time > 2.d || task.memory > 384.GB ? 'biohpc_gen_production' : 'biohpc_gen_normal') : 'biohpc_gen_highmem' } - beforeScript = 'module use /dss/dsslegfs02/pn73se/pn73se-dss-0000/spack/modules/x86_avx2/linux*' - module = 'charliecloud/0.22:miniconda3' + queue = { task.memory <= 1536.GB ? (task.time > 2.d || task.memory > 384.GB ? 'biohpc_gen_production' : 'biohpc_gen_normal') : 'biohpc_gen_highmem' } + module = 'charliecloud/0.25' } charliecloud { @@ -21,7 +20,7 @@ charliecloud { } params { - params.max_time = 14.d - params.max_cpus = 80 + params.max_time = 14.d + params.max_cpus = 80 params.max_memory = 3.TB } diff --git a/conf/cfc.config b/conf/cfc.config index 1948e14..999e8d6 100644 --- a/conf/cfc.config +++ b/conf/cfc.config @@ -12,7 +12,7 @@ singularity { process { executor = 'slurm' - queue = { task.memory > 60.GB || task.cpus > 20 ? 'qbic' : 'compute' } + queue = 'qbic' scratch = 'true' } diff --git a/conf/cfc_dev.config b/conf/cfc_dev.config index 1d61baf..87caf66 100644 --- a/conf/cfc_dev.config +++ b/conf/cfc_dev.config @@ -11,7 +11,7 @@ singularity { process { executor = 'slurm' - queue = { task.memory > 60.GB || task.cpus > 20 ? 'qbic' : 'compute' } + queue = 'qbic' scratch = 'true' } @@ -25,4 +25,4 @@ params { max_memory = 1999.GB max_cpus = 128 max_time = 140.h -} \ No newline at end of file +} diff --git a/conf/cheaha.config b/conf/cheaha.config index 58963b5..ec79f10 100644 --- a/conf/cheaha.config +++ b/conf/cheaha.config @@ -9,7 +9,7 @@ params { } env { - TMPDIR="$USER" + TMPDIR="$scratch_dir" SINGULARITY_TMPDIR="$scratch_dir" } diff --git a/conf/crukmi.config b/conf/crukmi.config index 4823585..778935b 100644 --- a/conf/crukmi.config +++ b/conf/crukmi.config @@ -5,11 +5,8 @@ params { config_profile_url = 'http://scicom.picr.man.ac.uk/projects/user-support/wiki' } -env { - SINGULARITY_CACHEDIR = '/lmod/nextflow_software' -} - singularity { + cacheDir = '/lmod/nextflow_software' enabled = true autoMounts = true } @@ -22,6 +19,11 @@ process { maxErrors = '-1' maxRetries = 3 + withLabel:process_single { + cpus = { check_max( 1 * task.attempt, 'cpus' ) } + memory = { check_max( 5.GB * task.attempt, 'memory' ) } + } + withLabel:process_low { cpus = { check_max( 1 * task.attempt, 'cpus' ) } memory = { check_max( 5.GB * task.attempt, 'memory' ) } diff --git a/conf/ku_sund_dangpu.config b/conf/ku_sund_dangpu.config new file mode 100644 index 0000000..51ca462 --- /dev/null +++ b/conf/ku_sund_dangpu.config @@ -0,0 +1,25 @@ +params { + config_profile_contact = 'Adrija Kalvisa ' + config_profile_description = 'dangpufl01 configuration' + config_profile_url = '' + + // General cpus/memory/time requirements + max_cpus = 30 + max_memory = 200.GB + max_time = 72.h +} + +process { + executor = 'slurm' + +} + +executor { + queueSize = 5 +} + +singularity { + enabled = true + autoMounts = true + runOptions = '--bind /projects:/projects' +} \ No newline at end of file diff --git a/docs/biohpc_gen.md b/docs/biohpc_gen.md index c007cad..660e789 100644 --- a/docs/biohpc_gen.md +++ b/docs/biohpc_gen.md @@ -4,14 +4,12 @@ All nf-core pipelines have been successfully configured for use on the BioHPC Ge To use, run the pipeline with `-profile biohpc_gen`. This will download and launch the [`biohpc_gen.config`](../conf/biohpc_gen.config) which has been pre-configured with a setup suitable for the biohpc_gen cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Charliecloud container before execution of the pipeline. -Before running the pipeline you will need to load Nextflow and Charliecloud using the environment module system on biohpc_gen. You can do this by issuing the commands below: +Before running the pipeline you will need to load Nextflow and Charliecloud using the environment module system on a login node. You can do this by issuing the commands below: ```bash ## Load Nextflow and Charliecloud environment modules -module purge -module load nextflow charliecloud/0.22 +module load nextflow/21.04.3 charliecloud/0.25 ``` -> NB: Charliecloud support requires Nextflow version `21.03.0-edge` or later. > NB: You will need an account to use the LRZ Linux cluster as well as group access to the biohpc_gen cluster in order to run nf-core pipelines. > NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. diff --git a/docs/ku_sund_dangpu.md b/docs/ku_sund_dangpu.md new file mode 100644 index 0000000..72baf46 --- /dev/null +++ b/docs/ku_sund_dangpu.md @@ -0,0 +1,35 @@ +# nf-core/configs: ku_sund_dangpu configuration + +All nf-core pipelines have been successfully configured for use on the DANGPU at the +Novo Nordisk Foundation Center for Stem Cell Medicine (reNEW) and the Novo Nordisk Foundation Center for Protein Research (CPR) at the University of Copenhagen. + +To use, run the pipeline with `-profile ku_sund_dangpu`. This will download and launch the [`ku_sund_dangpu.config`](../conf/ku_sund_dangpu.config) which has been pre-configured with a setup suitable for the DANGPU. + +## Modules + +Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on DANGPU. You can do this by issuing the commands below: + +```bash +## Load Nextflow and Singularity environment modules +module purge +module load java/11.0.15 nextflow/22.04.4 singularity/3.8.0 +# alternative modules for older nextflow version (v.21) that works with java 8: +# module load jdk/1.8.0_291 nextflow/21.04.1.5556 singularity/3.8.0 +export NXF_OPTS='-Xms1g -Xmx4g' +export NXF_HOME=/projects/dan1/people/${USER}/cache/nxf-home +export NXF_TEMP=/scratch/tmp +export NXF_SINGULARITY_CACHEDIR=/projects/dan1/people/${USER}/cache/singularity-images +``` + +Create the user-specific nextflow directories if they don't exist yet: + +``` +mkdir $NXF_SINGULARITY_CACHEDIR +mkdir $NXF_HOME +``` + +Finally, download and test the pipeline of choice using the `-profile ku_sund_dangpu`. Note that normally you would run resource-intensive commands with slurm, but in case of nf-core pipelines you do not have to do this: we have pre-configured slurm to be the resource manager within the `ku_sund_dangpu profile`. Just make sure that the pipeline is run within a tmux session. + +``` +nextflow run nf-core/rnaseq -profile test,ku_sund_dangpu +``` diff --git a/nfcore_custom.config b/nfcore_custom.config index d93e266..aba5349 100644 --- a/nfcore_custom.config +++ b/nfcore_custom.config @@ -49,6 +49,7 @@ profiles { ifb_core { includeConfig "${params.custom_config_base}/conf/ifb_core.config" } imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" } jax { includeConfig "${params.custom_config_base}/conf/jax.config" } + ku_sund_dangpu {includeConfig "${params.custom_config_base}/conf/ku_sund_dangpu.config"} lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" } maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" } marvin { includeConfig "${params.custom_config_base}/conf/marvin.config" }