mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-24 09:09:56 +00:00
Merge branch 'master' into bgrande/sage-improve-downloads
This commit is contained in:
commit
735df9ef76
11 changed files with 79 additions and 17 deletions
1
.github/workflows/main.yml
vendored
1
.github/workflows/main.yml
vendored
|
@ -67,6 +67,7 @@ jobs:
|
||||||
- "ifb_core"
|
- "ifb_core"
|
||||||
- "imperial"
|
- "imperial"
|
||||||
- "jax"
|
- "jax"
|
||||||
|
- "ku_sund_dangpu"
|
||||||
- "lugh"
|
- "lugh"
|
||||||
- "marvin"
|
- "marvin"
|
||||||
- "medair"
|
- "medair"
|
||||||
|
|
|
@ -122,6 +122,7 @@ Currently documentation is available for the following systems:
|
||||||
- [ICR_DAVROS](docs/icr_davros.md)
|
- [ICR_DAVROS](docs/icr_davros.md)
|
||||||
- [IMPERIAL](docs/imperial.md)
|
- [IMPERIAL](docs/imperial.md)
|
||||||
- [JAX](docs/jax.md)
|
- [JAX](docs/jax.md)
|
||||||
|
- [KU SUND DANGPU](docs/ku_sund_dangpu.md)
|
||||||
- [LUGH](docs/lugh.md)
|
- [LUGH](docs/lugh.md)
|
||||||
- [MAESTRO](docs/maestro.md)
|
- [MAESTRO](docs/maestro.md)
|
||||||
- [MARVIN](docs/marvin.md)
|
- [MARVIN](docs/marvin.md)
|
||||||
|
|
|
@ -11,9 +11,8 @@ env {
|
||||||
|
|
||||||
process {
|
process {
|
||||||
executor = 'slurm'
|
executor = 'slurm'
|
||||||
queue = { task.memory <= 1536.GB ? (task.time > 2.d || task.memory > 384.GB ? 'biohpc_gen_production' : 'biohpc_gen_normal') : 'biohpc_gen_highmem' }
|
queue = { task.memory <= 1536.GB ? (task.time > 2.d || task.memory > 384.GB ? 'biohpc_gen_production' : 'biohpc_gen_normal') : 'biohpc_gen_highmem' }
|
||||||
beforeScript = 'module use /dss/dsslegfs02/pn73se/pn73se-dss-0000/spack/modules/x86_avx2/linux*'
|
module = 'charliecloud/0.25'
|
||||||
module = 'charliecloud/0.22:miniconda3'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
charliecloud {
|
charliecloud {
|
||||||
|
@ -21,7 +20,7 @@ charliecloud {
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
params.max_time = 14.d
|
params.max_time = 14.d
|
||||||
params.max_cpus = 80
|
params.max_cpus = 80
|
||||||
params.max_memory = 3.TB
|
params.max_memory = 3.TB
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ singularity {
|
||||||
|
|
||||||
process {
|
process {
|
||||||
executor = 'slurm'
|
executor = 'slurm'
|
||||||
queue = { task.memory > 60.GB || task.cpus > 20 ? 'qbic' : 'compute' }
|
queue = 'qbic'
|
||||||
scratch = 'true'
|
scratch = 'true'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ singularity {
|
||||||
|
|
||||||
process {
|
process {
|
||||||
executor = 'slurm'
|
executor = 'slurm'
|
||||||
queue = { task.memory > 60.GB || task.cpus > 20 ? 'qbic' : 'compute' }
|
queue = 'qbic'
|
||||||
scratch = 'true'
|
scratch = 'true'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,4 +25,4 @@ params {
|
||||||
max_memory = 1999.GB
|
max_memory = 1999.GB
|
||||||
max_cpus = 128
|
max_cpus = 128
|
||||||
max_time = 140.h
|
max_time = 140.h
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ params {
|
||||||
}
|
}
|
||||||
|
|
||||||
env {
|
env {
|
||||||
TMPDIR="$USER"
|
TMPDIR="$scratch_dir"
|
||||||
SINGULARITY_TMPDIR="$scratch_dir"
|
SINGULARITY_TMPDIR="$scratch_dir"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,11 +5,8 @@ params {
|
||||||
config_profile_url = 'http://scicom.picr.man.ac.uk/projects/user-support/wiki'
|
config_profile_url = 'http://scicom.picr.man.ac.uk/projects/user-support/wiki'
|
||||||
}
|
}
|
||||||
|
|
||||||
env {
|
|
||||||
SINGULARITY_CACHEDIR = '/lmod/nextflow_software'
|
|
||||||
}
|
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
|
cacheDir = '/lmod/nextflow_software'
|
||||||
enabled = true
|
enabled = true
|
||||||
autoMounts = true
|
autoMounts = true
|
||||||
}
|
}
|
||||||
|
@ -22,6 +19,11 @@ process {
|
||||||
maxErrors = '-1'
|
maxErrors = '-1'
|
||||||
maxRetries = 3
|
maxRetries = 3
|
||||||
|
|
||||||
|
withLabel:process_single {
|
||||||
|
cpus = { check_max( 1 * task.attempt, 'cpus' ) }
|
||||||
|
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
|
||||||
|
}
|
||||||
|
|
||||||
withLabel:process_low {
|
withLabel:process_low {
|
||||||
cpus = { check_max( 1 * task.attempt, 'cpus' ) }
|
cpus = { check_max( 1 * task.attempt, 'cpus' ) }
|
||||||
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
|
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
|
||||||
|
|
25
conf/ku_sund_dangpu.config
Normal file
25
conf/ku_sund_dangpu.config
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
params {
|
||||||
|
config_profile_contact = 'Adrija Kalvisa <adrija.kalvisa@sund.ku.dk>'
|
||||||
|
config_profile_description = 'dangpufl01 configuration'
|
||||||
|
config_profile_url = ''
|
||||||
|
|
||||||
|
// General cpus/memory/time requirements
|
||||||
|
max_cpus = 30
|
||||||
|
max_memory = 200.GB
|
||||||
|
max_time = 72.h
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
executor {
|
||||||
|
queueSize = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
autoMounts = true
|
||||||
|
runOptions = '--bind /projects:/projects'
|
||||||
|
}
|
|
@ -4,14 +4,12 @@ All nf-core pipelines have been successfully configured for use on the BioHPC Ge
|
||||||
|
|
||||||
To use, run the pipeline with `-profile biohpc_gen`. This will download and launch the [`biohpc_gen.config`](../conf/biohpc_gen.config) which has been pre-configured with a setup suitable for the biohpc_gen cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Charliecloud container before execution of the pipeline.
|
To use, run the pipeline with `-profile biohpc_gen`. This will download and launch the [`biohpc_gen.config`](../conf/biohpc_gen.config) which has been pre-configured with a setup suitable for the biohpc_gen cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Charliecloud container before execution of the pipeline.
|
||||||
|
|
||||||
Before running the pipeline you will need to load Nextflow and Charliecloud using the environment module system on biohpc_gen. You can do this by issuing the commands below:
|
Before running the pipeline you will need to load Nextflow and Charliecloud using the environment module system on a login node. You can do this by issuing the commands below:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
## Load Nextflow and Charliecloud environment modules
|
## Load Nextflow and Charliecloud environment modules
|
||||||
module purge
|
module load nextflow/21.04.3 charliecloud/0.25
|
||||||
module load nextflow charliecloud/0.22
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> NB: Charliecloud support requires Nextflow version `21.03.0-edge` or later.
|
|
||||||
> NB: You will need an account to use the LRZ Linux cluster as well as group access to the biohpc_gen cluster in order to run nf-core pipelines.
|
> NB: You will need an account to use the LRZ Linux cluster as well as group access to the biohpc_gen cluster in order to run nf-core pipelines.
|
||||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes.
|
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes.
|
||||||
|
|
35
docs/ku_sund_dangpu.md
Normal file
35
docs/ku_sund_dangpu.md
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
# nf-core/configs: ku_sund_dangpu configuration
|
||||||
|
|
||||||
|
All nf-core pipelines have been successfully configured for use on the DANGPU at the
|
||||||
|
Novo Nordisk Foundation Center for Stem Cell Medicine (reNEW) and the Novo Nordisk Foundation Center for Protein Research (CPR) at the University of Copenhagen.
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile ku_sund_dangpu`. This will download and launch the [`ku_sund_dangpu.config`](../conf/ku_sund_dangpu.config) which has been pre-configured with a setup suitable for the DANGPU.
|
||||||
|
|
||||||
|
## Modules
|
||||||
|
|
||||||
|
Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on DANGPU. You can do this by issuing the commands below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
## Load Nextflow and Singularity environment modules
|
||||||
|
module purge
|
||||||
|
module load java/11.0.15 nextflow/22.04.4 singularity/3.8.0
|
||||||
|
# alternative modules for older nextflow version (v.21) that works with java 8:
|
||||||
|
# module load jdk/1.8.0_291 nextflow/21.04.1.5556 singularity/3.8.0
|
||||||
|
export NXF_OPTS='-Xms1g -Xmx4g'
|
||||||
|
export NXF_HOME=/projects/dan1/people/${USER}/cache/nxf-home
|
||||||
|
export NXF_TEMP=/scratch/tmp
|
||||||
|
export NXF_SINGULARITY_CACHEDIR=/projects/dan1/people/${USER}/cache/singularity-images
|
||||||
|
```
|
||||||
|
|
||||||
|
Create the user-specific nextflow directories if they don't exist yet:
|
||||||
|
|
||||||
|
```
|
||||||
|
mkdir $NXF_SINGULARITY_CACHEDIR
|
||||||
|
mkdir $NXF_HOME
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, download and test the pipeline of choice using the `-profile ku_sund_dangpu`. Note that normally you would run resource-intensive commands with slurm, but in case of nf-core pipelines you do not have to do this: we have pre-configured slurm to be the resource manager within the `ku_sund_dangpu profile`. Just make sure that the pipeline is run within a tmux session.
|
||||||
|
|
||||||
|
```
|
||||||
|
nextflow run nf-core/rnaseq -profile test,ku_sund_dangpu
|
||||||
|
```
|
|
@ -49,6 +49,7 @@ profiles {
|
||||||
ifb_core { includeConfig "${params.custom_config_base}/conf/ifb_core.config" }
|
ifb_core { includeConfig "${params.custom_config_base}/conf/ifb_core.config" }
|
||||||
imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" }
|
imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" }
|
||||||
jax { includeConfig "${params.custom_config_base}/conf/jax.config" }
|
jax { includeConfig "${params.custom_config_base}/conf/jax.config" }
|
||||||
|
ku_sund_dangpu {includeConfig "${params.custom_config_base}/conf/ku_sund_dangpu.config"}
|
||||||
lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" }
|
lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" }
|
||||||
maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" }
|
maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" }
|
||||||
marvin { includeConfig "${params.custom_config_base}/conf/marvin.config" }
|
marvin { includeConfig "${params.custom_config_base}/conf/marvin.config" }
|
||||||
|
|
Loading…
Reference in a new issue