mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-24 09:09:56 +00:00
commit
5ba1b01c0c
5 changed files with 51 additions and 1 deletions
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
|
@ -16,7 +16,7 @@ jobs:
|
|||
needs: test_all_profiles
|
||||
strategy:
|
||||
matrix:
|
||||
profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh']
|
||||
profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'pasteur', 'phoenix', 'prince', 'seg_globe', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh']
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Install Nextflow
|
||||
|
|
|
@ -119,6 +119,7 @@ Currently documentation is available for the following systems:
|
|||
* [PASTEUR](docs/pasteur.md)
|
||||
* [PHOENIX](docs/phoenix.md)
|
||||
* [PRINCE](docs/prince.md)
|
||||
* [SEG_GLOBE](docs/seg_globe.md)
|
||||
* [SHH](docs/shh.md)
|
||||
* [UCT_HPC](docs/uct_hpc.md)
|
||||
* [UPPMAX](docs/uppmax.md)
|
||||
|
|
27
conf/seg_globe.config
Normal file
27
conf/seg_globe.config
Normal file
|
@ -0,0 +1,27 @@
|
|||
//Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'Section for Evolutionary Genomics @ GLOBE, University of Copenhagen - seg_globe profile provided by nf-core/configs.'
|
||||
config_profile_contact = 'Aashild Vaagene (@ashildv)'
|
||||
config_profile_url = 'https://globe.ku.dk/research/evogenomics/'
|
||||
max_memory = 250.GB
|
||||
max_cpus = 35
|
||||
max_time = 720.h
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
cacheDir = '/shared/volume/hologenomics/data/cache/nf-eager/singularity'
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = { task.time < 24.h ? 'hologenomics-short' : task.time < 168.h ? 'hologenomics' : 'hologenomics-long' }
|
||||
}
|
||||
|
||||
cleanup = true
|
||||
|
||||
executor {
|
||||
queueSize = 8
|
||||
}
|
||||
|
21
docs/seg_globe.md
Normal file
21
docs/seg_globe.md
Normal file
|
@ -0,0 +1,21 @@
|
|||
# nf-core/configs: Section for Evolutionary Genomics at GLOBE, Univeristy of Copenhagen (hologenomics partition on HPC) Configuration
|
||||
|
||||
> **NB:** You will need an account to use the HPC cluster to run the pipeline. If in doubt contact IT.
|
||||
|
||||
The profile is configured to run with Singularity version 3.6.3-1.el7 which is part of the OS installtion and does not need to be loaded as a module.
|
||||
|
||||
Before running the pipeline you will need to load Java, miniconda and Nextflow. You can do this by including the commands below in your SLURM/sbatch script:
|
||||
|
||||
```bash
|
||||
## Load Java and Nextflow environment modules
|
||||
module purge
|
||||
module load lib
|
||||
module load java/v1.8.0_202-jdk miniconda nextflow/v20.07.1.5412
|
||||
```
|
||||
|
||||
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||
The config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completeed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space.
|
||||
|
||||
This configuration will automatically choose the correct SLURM queue (short,medium,long) depending on the time and memory required by each process.
|
||||
|
||||
> **NB:** Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be submitted from one of the login nodes.
|
|
@ -38,6 +38,7 @@ profiles {
|
|||
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
|
||||
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
||||
prince { includeConfig "${params.custom_config_base}/conf/prince.config" }
|
||||
seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"}
|
||||
shh { includeConfig "${params.custom_config_base}/conf/shh.config" }
|
||||
uct_hpc { includeConfig "${params.custom_config_base}/conf/uct_hpc.config" }
|
||||
uppmax { includeConfig "${params.custom_config_base}/conf/uppmax.config" }
|
||||
|
|
Loading…
Reference in a new issue