mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-10 20:13:09 +00:00
add ALICE profile
This commit is contained in:
parent
acada75ffe
commit
b6ccb46383
5 changed files with 68 additions and 0 deletions
1
.github/workflows/main.yml
vendored
1
.github/workflows/main.yml
vendored
|
@ -18,6 +18,7 @@ jobs:
|
||||||
matrix:
|
matrix:
|
||||||
profile:
|
profile:
|
||||||
- 'abims'
|
- 'abims'
|
||||||
|
- 'alice'
|
||||||
- 'aws_tower'
|
- 'aws_tower'
|
||||||
- 'awsbatch'
|
- 'awsbatch'
|
||||||
- 'bi'
|
- 'bi'
|
||||||
|
|
|
@ -91,6 +91,7 @@ See [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs
|
||||||
Currently documentation is available for the following systems:
|
Currently documentation is available for the following systems:
|
||||||
|
|
||||||
* [ABIMS](docs/abims.md)
|
* [ABIMS](docs/abims.md)
|
||||||
|
* [ALICE](docs/alice.md)
|
||||||
* [AWSBATCH](docs/awsbatch.md)
|
* [AWSBATCH](docs/awsbatch.md)
|
||||||
* [AWS_TOWER](docs/aws_tower.md)
|
* [AWS_TOWER](docs/aws_tower.md)
|
||||||
* [BIGPURPLE](docs/bigpurple.md)
|
* [BIGPURPLE](docs/bigpurple.md)
|
||||||
|
|
39
conf/alice.config
Normal file
39
conf/alice.config
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
params {
|
||||||
|
config_profile_name = 'ALICE'
|
||||||
|
config_profile_description = 'Profile for use on Academic Leiden Interdisciplinary Cluster Environment (ALICE).'
|
||||||
|
config_profile_contact = 'Bjorn Peare Bartholdy (@osteobjorn)'
|
||||||
|
config_profile_url = 'https://wiki.alice.universiteitleiden.nl/index.php?title=ALICE_User_Documentation_Wiki'
|
||||||
|
max_cpus = 24
|
||||||
|
max_memory = 240.GB
|
||||||
|
max_time = 168.h
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
queue = { task.time < 3.h ? 'cpu-short' : task.time < 24.h ? 'cpu-medium' : 'cpu-long' }
|
||||||
|
}
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
autoMounts = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preform work directory cleanup after a successful run
|
||||||
|
cleanup = true
|
||||||
|
|
||||||
|
// Profile to deactivate automatic cleanup of work directory after a successful run. Overwrites cleanup option.
|
||||||
|
profiles {
|
||||||
|
mem {
|
||||||
|
params {
|
||||||
|
max_cpus = 24
|
||||||
|
max_memory = 2.TB
|
||||||
|
max_time = 336.h
|
||||||
|
}
|
||||||
|
process {
|
||||||
|
queue = 'mem'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug {
|
||||||
|
cleanup = false
|
||||||
|
}
|
||||||
|
}
|
26
docs/alice.md
Normal file
26
docs/alice.md
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
# nf-core/configs: Academic Leiden Interdisciplinary Cluster Environment (ALICE), Leiden University Configuration
|
||||||
|
|
||||||
|
> **NB:** You will need an [account](https://wiki.alice.universiteitleiden.nl/index.php?title=Getting_an_account) to use the HPC cluster to run the pipeline.
|
||||||
|
|
||||||
|
The profile is configured to run with Singularity version 3.6.1-Go-1.14 and needs to be loaded as a module.
|
||||||
|
|
||||||
|
Before running the pipeline you will need to load Java and Nextflow. You can do this by including the commands below in your SLURM/sbatch script:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
## Load Java and Nextflow environment modules
|
||||||
|
module load Singularity/3.6.1-Go-1.14
|
||||||
|
module load Nextflow/21.03.0
|
||||||
|
module load Java/11.0.2
|
||||||
|
```
|
||||||
|
|
||||||
|
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||||
|
The config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completeed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space.
|
||||||
|
|
||||||
|
This configuration will automatically choose the correct SLURM queue (short,medium,long) depending on the time required by each process. If there are
|
||||||
|
high memory requirements (>240GB), e.g. using MALT, use the 'mem' profile:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
run nextflow nf-core/eager -p alice,mem
|
||||||
|
```
|
||||||
|
|
||||||
|
> **NB:** Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be submitted from one of the login nodes.
|
|
@ -11,6 +11,7 @@
|
||||||
//Please use a new line per include Config section to allow easier linting/parsing. Thank you.
|
//Please use a new line per include Config section to allow easier linting/parsing. Thank you.
|
||||||
profiles {
|
profiles {
|
||||||
abims { includeConfig "${params.custom_config_base}/conf/abims.config" }
|
abims { includeConfig "${params.custom_config_base}/conf/abims.config" }
|
||||||
|
alice { includeConfig "${params.custom_config_base}/conf/alice.config" }
|
||||||
aws_tower { includeConfig "${params.custom_config_base}/conf/aws_tower.config" }
|
aws_tower { includeConfig "${params.custom_config_base}/conf/aws_tower.config" }
|
||||||
awsbatch { includeConfig "${params.custom_config_base}/conf/awsbatch.config" }
|
awsbatch { includeConfig "${params.custom_config_base}/conf/awsbatch.config" }
|
||||||
bi { includeConfig "${params.custom_config_base}/conf/bi.config" }
|
bi { includeConfig "${params.custom_config_base}/conf/bi.config" }
|
||||||
|
|
Loading…
Reference in a new issue