1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-21 16:16:04 +00:00

Add HKI profile files

This commit is contained in:
James Fellows Yates 2022-09-14 15:34:27 +02:00
parent 8ea01e2041
commit c0effc0366
5 changed files with 131 additions and 0 deletions

View file

@ -62,6 +62,7 @@ jobs:
- "google"
- "hasta"
- "hebbe"
- "hki"
- "icr_davros"
- "ifb_core"
- "imperial"

View file

@ -118,6 +118,7 @@ Currently documentation is available for the following systems:
- [GOOGLE](docs/google.md)
- [HASTA](docs/hasta.md)
- [HEBBE](docs/hebbe.md)
- [HKI](docs/hki.md)
- [ICR_DAVROS](docs/icr_davros.md)
- [IMPERIAL](docs/imperial.md)
- [JAX](docs/jax.md)

104
conf/hki.config Normal file
View file

@ -0,0 +1,104 @@
params {
config_profile_description = 'HKI clusters profile provided by nf-core/configs.'
config_profile_contact = 'James Fellows Yates (@jfy133)'
config_profile_url = 'https://leibniz-hki.de'
}
profiles {
apate {
params {
config_profile_description = 'apate HKI cluster profile provided by nf-core/configs.'
config_profile_contact = 'James Fellows Yates (@jfy133)'
config_profile_url = 'https://leibniz-hki.de'
max_memory = 128.GB
max_cpus = 32
max_time = 1440.h
}
process {
executor = 'local'
maxRetries = 2
}
executor {
queueSize = 8
}
singularity {
enabled = true
autoMounts = true
cacheDir = '/Net/Groups/ccdata/apps/singularity'
}
conda {
cacheDir = '/Net/Groups/ccdata/apps/conda_envs'
}
cleanup = true
}
aither {
params {
config_profile_description = 'apate HKI cluster profile provided by nf-core/configs.'
config_profile_contact = 'James Fellows Yates (@jfy133)'
config_profile_url = 'https://leibniz-hki.de'
max_memory = 128.GB
max_cpus = 32
max_time = 1440.h
}
process {
executor = 'local'
maxRetries = 2
}
executor {
queueSize = 8
}
singularity {
enabled = true
autoMounts = true
cacheDir = '/Net/Groups/ccdata/apps/singularity'
}
conda {
cacheDir = '/Net/Groups/ccdata/apps/conda_envs'
}
cleanup = true
}
arges {
params {
config_profile_description = 'apate HKI cluster profile provided by nf-core/configs.'
config_profile_contact = 'James Fellows Yates (@jfy133)'
config_profile_url = 'https://leibniz-hki.de'
max_memory = 64.GB
max_cpus = 12
max_time = 1440.h
}
process {
executor = 'local'
maxRetries = 2
}
executor {
queueSize = 8
}
singularity {
enabled = true
autoMounts = true
cacheDir = '/Net/Groups/ccdata/apps/singularity'
}
conda {
cacheDir = '/Net/Groups/ccdata/apps/conda_envs'
}
cleanup = true
}
debug {
cleanup = false
}
}

24
docs/hki.md Normal file
View file

@ -0,0 +1,24 @@
# nf-core/configs: HKI Configuration
All nf-core pipelines have been successfully configured for use on clusters at the [Leibniz Institute for Natural Product Research and Infection Biology Hans Knöll Institute](https://www.leibniz-hki.de/en/).
To use, run the pipeline with `-profile hki,<cluster>`. This will download and launch the [`hki.config`](../conf/hki.config) which contains specific profiles for each cluter. The number of parallel jobs that run is currently limited to 8.
The profiles currently available are:
- apate (uses singularity, cleanup set to true by default)
- arges (uses singularity, cleanup set to true by default)
- aither (uses singularity, cleanup set to true by default)
- debug (sets cleanup to false for debugging purposes, use e.g. `profile hki,<cluster>,debug`)
Note that Nextflow is not necessarily installed by default on the HKI HPC cluster(s). You will need to install it into a directory you have write access to.
Follow these instructions from the Nextflow documentation.
- Install Nextflow : [here](https://www.nextflow.io/docs/latest/getstarted.html#)
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline
has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
> NB: You will need an account to use the HKI HPC clusters in order to run the pipeline. If in doubt contact the ICT Service Desk.
> NB: Nextflow will need to submit the jobs via SLURM to the HKI HPC clusters and as such the commands above will have to be executed on the login
> node. If in doubt contact ICT.

View file

@ -44,6 +44,7 @@ profiles {
google { includeConfig "${params.custom_config_base}/conf/google.config" }
hasta { includeConfig "${params.custom_config_base}/conf/hasta.config" }
hebbe { includeConfig "${params.custom_config_base}/conf/hebbe.config" }
hki { includeConfig "${params.custom_config_base}/conf/hki.config"}
icr_davros { includeConfig "${params.custom_config_base}/conf/icr_davros.config" }
ifb_core { includeConfig "${params.custom_config_base}/conf/ifb_core.config" }
imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" }